summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorBrendan Abolivier <babolivier@matrix.org>2020-08-06 10:52:50 +0100
committerBrendan Abolivier <babolivier@matrix.org>2020-08-06 10:52:50 +0100
commit118a9eafb31fbd51d23ef8abbaac481579b7f9e5 (patch)
treeba84cd874fbe43c4981abaa3d8aa2c9913d5097b /synapse
parentIncorporate review (diff)
parentFixup worker doc (again) (#8000) (diff)
downloadsynapse-118a9eafb31fbd51d23ef8abbaac481579b7f9e5.tar.xz
Merge branch 'develop' of github.com:matrix-org/synapse into babolivier/new_push_rules
Diffstat (limited to 'synapse')
-rw-r--r--synapse/app/_base.py18
-rw-r--r--synapse/app/generic_worker.py14
-rw-r--r--synapse/app/homeserver.py19
-rw-r--r--synapse/appservice/api.py2
-rw-r--r--synapse/config/database.py5
-rw-r--r--synapse/config/ratelimiting.py21
-rw-r--r--synapse/crypto/keyring.py201
-rw-r--r--synapse/events/snapshot.py2
-rw-r--r--synapse/handlers/acme.py11
-rw-r--r--synapse/handlers/federation.py2
-rw-r--r--synapse/handlers/identity.py34
-rw-r--r--synapse/handlers/initial_sync.py4
-rw-r--r--synapse/handlers/message.py2
-rw-r--r--synapse/handlers/pagination.py2
-rw-r--r--synapse/handlers/presence.py4
-rw-r--r--synapse/handlers/register.py2
-rw-r--r--synapse/handlers/room.py10
-rw-r--r--synapse/handlers/room_member.py37
-rw-r--r--synapse/handlers/search.py2
-rw-r--r--synapse/handlers/stats.py2
-rw-r--r--synapse/handlers/sync.py2
-rw-r--r--synapse/http/client.py55
-rw-r--r--synapse/http/server.py16
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/notifier.py4
-rw-r--r--synapse/python_dependencies.py1
-rw-r--r--synapse/replication/http/_base.py18
-rw-r--r--synapse/replication/http/devices.py2
-rw-r--r--synapse/replication/http/federation.py17
-rw-r--r--synapse/replication/http/login.py2
-rw-r--r--synapse/replication/http/membership.py8
-rw-r--r--synapse/replication/http/presence.py4
-rw-r--r--synapse/replication/http/register.py4
-rw-r--r--synapse/replication/http/send_event.py7
-rw-r--r--synapse/replication/http/streams.py2
-rw-r--r--synapse/replication/slave/storage/_base.py6
-rw-r--r--synapse/replication/slave/storage/account_data.py8
-rw-r--r--synapse/replication/slave/storage/appservice.py2
-rw-r--r--synapse/replication/slave/storage/client_ips.py6
-rw-r--r--synapse/replication/slave/storage/deviceinbox.py6
-rw-r--r--synapse/replication/slave/storage/devices.py8
-rw-r--r--synapse/replication/slave/storage/directory.py2
-rw-r--r--synapse/replication/slave/storage/events.py24
-rw-r--r--synapse/replication/slave/storage/filtering.py6
-rw-r--r--synapse/replication/slave/storage/groups.py6
-rw-r--r--synapse/replication/slave/storage/keys.py2
-rw-r--r--synapse/replication/slave/storage/presence.py6
-rw-r--r--synapse/replication/slave/storage/profile.py2
-rw-r--r--synapse/replication/slave/storage/push_rule.py2
-rw-r--r--synapse/replication/slave/storage/pushers.py6
-rw-r--r--synapse/replication/slave/storage/receipts.py6
-rw-r--r--synapse/replication/slave/storage/registration.py2
-rw-r--r--synapse/replication/slave/storage/room.py6
-rw-r--r--synapse/replication/slave/storage/transactions.py2
-rw-r--r--synapse/rest/admin/rooms.py2
-rw-r--r--synapse/rest/client/v1/room.py2
-rw-r--r--synapse/rest/media/v1/media_storage.py23
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py2
-rw-r--r--synapse/rest/media/v1/storage_provider.py19
-rw-r--r--synapse/server.py4
-rw-r--r--synapse/server_notices/consent_server_notices.py12
-rw-r--r--synapse/server_notices/resource_limits_server_notices.py43
-rw-r--r--synapse/server_notices/server_notices_manager.py29
-rw-r--r--synapse/server_notices/server_notices_sender.py12
-rw-r--r--synapse/server_notices/worker_server_notices_sender.py17
-rw-r--r--synapse/state/__init__.py2
-rw-r--r--synapse/storage/__init__.py15
-rw-r--r--synapse/storage/_base.py6
-rw-r--r--synapse/storage/background_updates.py22
-rw-r--r--synapse/storage/database.py2
-rw-r--r--synapse/storage/databases/__init__.py (renamed from synapse/storage/data_stores/__init__.py)18
-rw-r--r--synapse/storage/databases/main/__init__.py (renamed from synapse/storage/data_stores/main/__init__.py)36
-rw-r--r--synapse/storage/databases/main/account_data.py (renamed from synapse/storage/data_stores/main/account_data.py)34
-rw-r--r--synapse/storage/databases/main/appservice.py (renamed from synapse/storage/data_stores/main/appservice.py)30
-rw-r--r--synapse/storage/databases/main/cache.py (renamed from synapse/storage/data_stores/main/cache.py)10
-rw-r--r--synapse/storage/databases/main/censor_events.py (renamed from synapse/storage/data_stores/main/censor_events.py)26
-rw-r--r--synapse/storage/databases/main/client_ips.py (renamed from synapse/storage/data_stores/main/client_ips.py)66
-rw-r--r--synapse/storage/databases/main/deviceinbox.py (renamed from synapse/storage/data_stores/main/deviceinbox.py)32
-rw-r--r--synapse/storage/databases/main/devices.py (renamed from synapse/storage/data_stores/main/devices.py)122
-rw-r--r--synapse/storage/databases/main/directory.py (renamed from synapse/storage/data_stores/main/directory.py)18
-rw-r--r--synapse/storage/databases/main/e2e_room_keys.py (renamed from synapse/storage/data_stores/main/e2e_room_keys.py)28
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py (renamed from synapse/storage/data_stores/main/end_to_end_keys.py)48
-rw-r--r--synapse/storage/databases/main/event_federation.py (renamed from synapse/storage/data_stores/main/event_federation.py)54
-rw-r--r--synapse/storage/databases/main/event_push_actions.py (renamed from synapse/storage/data_stores/main/event_push_actions.py)50
-rw-r--r--synapse/storage/databases/main/events.py (renamed from synapse/storage/data_stores/main/events.py)74
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py (renamed from synapse/storage/data_stores/main/events_bg_updates.py)68
-rw-r--r--synapse/storage/databases/main/events_worker.py (renamed from synapse/storage/data_stores/main/events_worker.py)40
-rw-r--r--synapse/storage/databases/main/filtering.py (renamed from synapse/storage/data_stores/main/filtering.py)4
-rw-r--r--synapse/storage/databases/main/group_server.py (renamed from synapse/storage/data_stores/main/group_server.py)160
-rw-r--r--synapse/storage/databases/main/keys.py (renamed from synapse/storage/data_stores/main/keys.py)14
-rw-r--r--synapse/storage/databases/main/media_repository.py (renamed from synapse/storage/data_stores/main/media_repository.py)54
-rw-r--r--synapse/storage/databases/main/metrics.py (renamed from synapse/storage/data_stores/main/metrics.py)16
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py (renamed from synapse/storage/data_stores/main/monthly_active_users.py)24
-rw-r--r--synapse/storage/databases/main/openid.py (renamed from synapse/storage/data_stores/main/openid.py)4
-rw-r--r--synapse/storage/databases/main/presence.py (renamed from synapse/storage/data_stores/main/presence.py)12
-rw-r--r--synapse/storage/databases/main/profile.py (renamed from synapse/storage/data_stores/main/profile.py)30
-rw-r--r--synapse/storage/databases/main/purge_events.py (renamed from synapse/storage/data_stores/main/purge_events.py)6
-rw-r--r--synapse/storage/databases/main/push_rule.py (renamed from synapse/storage/data_stores/main/push_rule.py)50
-rw-r--r--synapse/storage/databases/main/pusher.py (renamed from synapse/storage/data_stores/main/pusher.py)34
-rw-r--r--synapse/storage/databases/main/receipts.py (renamed from synapse/storage/data_stores/main/receipts.py)42
-rw-r--r--synapse/storage/databases/main/registration.py (renamed from synapse/storage/data_stores/main/registration.py)192
-rw-r--r--synapse/storage/databases/main/rejections.py (renamed from synapse/storage/data_stores/main/rejections.py)2
-rw-r--r--synapse/storage/databases/main/relations.py (renamed from synapse/storage/data_stores/main/relations.py)10
-rw-r--r--synapse/storage/databases/main/room.py (renamed from synapse/storage/data_stores/main/room.py)120
-rw-r--r--synapse/storage/databases/main/roommember.py (renamed from synapse/storage/data_stores/main/roommember.py)70
-rw-r--r--synapse/storage/databases/main/schema/delta/12/v12.sql (renamed from synapse/storage/data_stores/main/schema/delta/12/v12.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/13/v13.sql (renamed from synapse/storage/data_stores/main/schema/delta/13/v13.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/14/v14.sql (renamed from synapse/storage/data_stores/main/schema/delta/14/v14.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/15/appservice_txns.sql (renamed from synapse/storage/data_stores/main/schema/delta/15/appservice_txns.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/15/presence_indices.sql (renamed from synapse/storage/data_stores/main/schema/delta/15/presence_indices.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/15/v15.sql (renamed from synapse/storage/data_stores/main/schema/delta/15/v15.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/events_order_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/events_order_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/remote_media_cache_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/remove_duplicates.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/room_alias_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/room_alias_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/unique_constraints.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/unique_constraints.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/16/users.sql (renamed from synapse/storage/data_stores/main/schema/delta/16/users.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/17/drop_indexes.sql (renamed from synapse/storage/data_stores/main/schema/delta/17/drop_indexes.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/17/server_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/17/server_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/17/user_threepids.sql (renamed from synapse/storage/data_stores/main/schema/delta/17/user_threepids.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql (renamed from synapse/storage/data_stores/main/schema/delta/18/server_keys_bigger_ints.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/19/event_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/19/event_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/20/dummy.sql (renamed from synapse/storage/data_stores/main/schema/delta/20/dummy.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/20/pushers.py (renamed from synapse/storage/data_stores/main/schema/delta/20/pushers.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/21/end_to_end_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/21/receipts.sql (renamed from synapse/storage/data_stores/main/schema/delta/21/receipts.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/22/receipts_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/22/receipts_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql (renamed from synapse/storage/data_stores/main/schema/delta/22/user_threepids_unique.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/24/stats_reporting.sql (renamed from synapse/storage/data_stores/main/schema/delta/24/stats_reporting.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/25/fts.py (renamed from synapse/storage/data_stores/main/schema/delta/25/fts.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/25/guest_access.sql (renamed from synapse/storage/data_stores/main/schema/delta/25/guest_access.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/25/history_visibility.sql (renamed from synapse/storage/data_stores/main/schema/delta/25/history_visibility.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/25/tags.sql (renamed from synapse/storage/data_stores/main/schema/delta/25/tags.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/26/account_data.sql (renamed from synapse/storage/data_stores/main/schema/delta/26/account_data.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/27/account_data.sql (renamed from synapse/storage/data_stores/main/schema/delta/27/account_data.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql (renamed from synapse/storage/data_stores/main/schema/delta/27/forgotten_memberships.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/27/ts.py (renamed from synapse/storage/data_stores/main/schema/delta/27/ts.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/event_push_actions.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/event_push_actions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/events_room_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/events_room_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/public_roms_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/public_roms_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/receipts_user_id_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/upgrade_times.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/upgrade_times.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/28/users_is_guest.sql (renamed from synapse/storage/data_stores/main/schema/delta/28/users_is_guest.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/29/push_actions.sql (renamed from synapse/storage/data_stores/main/schema/delta/29/push_actions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/alias_creator.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/alias_creator.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/as_users.py (renamed from synapse/storage/data_stores/main/schema/delta/30/as_users.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/deleted_pushers.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/presence_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/presence_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/public_rooms.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/public_rooms.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/push_rule_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql (renamed from synapse/storage/data_stores/main/schema/delta/30/threepid_guest_access_tokens.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/31/invites.sql (renamed from synapse/storage/data_stores/main/schema/delta/31/invites.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql (renamed from synapse/storage/data_stores/main/schema/delta/31/local_media_repository_url_cache.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/31/pushers.py (renamed from synapse/storage/data_stores/main/schema/delta/31/pushers.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/31/pushers_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/31/pushers_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/31/search_update.py (renamed from synapse/storage/data_stores/main/schema/delta/31/search_update.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/32/events.sql (renamed from synapse/storage/data_stores/main/schema/delta/32/events.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/32/openid.sql (renamed from synapse/storage/data_stores/main/schema/delta/32/openid.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql (renamed from synapse/storage/data_stores/main/schema/delta/32/pusher_throttle.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/32/remove_indices.sql (renamed from synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/32/reports.sql (renamed from synapse/storage/data_stores/main/schema/delta/32/reports.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/33/access_tokens_device_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/devices.sql (renamed from synapse/storage/data_stores/main/schema/delta/33/devices.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql (renamed from synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/event_fields.py (renamed from synapse/storage/data_stores/main/schema/delta/33/event_fields.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/remote_media_ts.py (renamed from synapse/storage/data_stores/main/schema/delta/33/remote_media_ts.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/33/user_ips_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/33/user_ips_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/34/appservice_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/34/appservice_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/34/cache_stream.py (renamed from synapse/storage/data_stores/main/schema/delta/34/cache_stream.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/34/device_inbox.sql (renamed from synapse/storage/data_stores/main/schema/delta/34/device_inbox.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql (renamed from synapse/storage/data_stores/main/schema/delta/34/push_display_name_rename.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/34/received_txn_purge.py (renamed from synapse/storage/data_stores/main/schema/delta/34/received_txn_purge.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/contains_url.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/contains_url.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/device_outbox.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/device_outbox.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/device_stream_id.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/device_stream_id.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/event_push_actions_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/public_room_list_change_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql (renamed from synapse/storage/data_stores/main/schema/delta/35/stream_order_to_extrem.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql (renamed from synapse/storage/data_stores/main/schema/delta/36/readd_public_rooms.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py (renamed from synapse/storage/data_stores/main/schema/delta/37/remove_auth_idx.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/37/user_threepids.sql (renamed from synapse/storage/data_stores/main/schema/delta/37/user_threepids.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql (renamed from synapse/storage/data_stores/main/schema/delta/38/postgres_fts_gist.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql (renamed from synapse/storage/data_stores/main/schema/delta/39/appservice_room_list.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/39/device_federation_stream_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/39/event_push_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/39/event_push_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/39/federation_out_position.sql (renamed from synapse/storage/data_stores/main/schema/delta/39/federation_out_position.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/39/membership_profile.sql (renamed from synapse/storage/data_stores/main/schema/delta/39/membership_profile.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/40/current_state_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/40/current_state_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/40/device_inbox.sql (renamed from synapse/storage/data_stores/main/schema/delta/40/device_inbox.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/40/device_list_streams.sql (renamed from synapse/storage/data_stores/main/schema/delta/40/device_list_streams.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/40/event_push_summary.sql (renamed from synapse/storage/data_stores/main/schema/delta/40/event_push_summary.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/40/pushers.sql (renamed from synapse/storage/data_stores/main/schema/delta/40/pushers.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/41/device_list_stream_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/41/device_outbound_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/41/event_search_event_id_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/41/ratelimit.sql (renamed from synapse/storage/data_stores/main/schema/delta/41/ratelimit.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/42/current_state_delta.sql (renamed from synapse/storage/data_stores/main/schema/delta/42/current_state_delta.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql (renamed from synapse/storage/data_stores/main/schema/delta/42/device_list_last_id.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql (renamed from synapse/storage/data_stores/main/schema/delta/42/event_auth_state_only.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/42/user_dir.py (renamed from synapse/storage/data_stores/main/schema/delta/42/user_dir.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql (renamed from synapse/storage/data_stores/main/schema/delta/43/blocked_rooms.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/43/quarantine_media.sql (renamed from synapse/storage/data_stores/main/schema/delta/43/quarantine_media.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/43/url_cache.sql (renamed from synapse/storage/data_stores/main/schema/delta/43/url_cache.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/43/user_share.sql (renamed from synapse/storage/data_stores/main/schema/delta/43/user_share.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql (renamed from synapse/storage/data_stores/main/schema/delta/44/expire_url_cache.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/45/group_server.sql (renamed from synapse/storage/data_stores/main/schema/delta/45/group_server.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/45/profile_cache.sql (renamed from synapse/storage/data_stores/main/schema/delta/45/profile_cache.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/drop_refresh_tokens.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/drop_unique_deleted_pushers.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/group_server.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/group_server.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/local_media_repository_url_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/user_dir_null_room_ids.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql (renamed from synapse/storage/data_stores/main/schema/delta/46/user_dir_typos.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/47/last_access_media.sql (renamed from synapse/storage/data_stores/main/schema/delta/47/last_access_media.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql (renamed from synapse/storage/data_stores/main/schema/delta/47/postgres_fts_gin.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql (renamed from synapse/storage/data_stores/main/schema/delta/47/push_actions_staging.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/48/add_user_consent.sql (renamed from synapse/storage/data_stores/main/schema/delta/48/add_user_consent.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/48/add_user_ips_last_seen_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/48/deactivated_users.sql (renamed from synapse/storage/data_stores/main/schema/delta/48/deactivated_users.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py (renamed from synapse/storage/data_stores/main/schema/delta/48/group_unique_indexes.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/48/groups_joinable.sql (renamed from synapse/storage/data_stores/main/schema/delta/48/groups_joinable.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql (renamed from synapse/storage/data_stores/main/schema/delta/49/add_user_consent_server_notice_sent.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql (renamed from synapse/storage/data_stores/main/schema/delta/49/add_user_daily_visits.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/49/add_user_ips_last_seen_only_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/50/add_creation_ts_users_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/50/erasure_store.sql (renamed from synapse/storage/data_stores/main/schema/delta/50/erasure_store.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py (renamed from synapse/storage/data_stores/main/schema/delta/50/make_event_content_nullable.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/51/e2e_room_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql (renamed from synapse/storage/data_stores/main/schema/delta/51/monthly_active_users.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/52/add_event_to_state_group_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/52/device_list_streams_unique_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/52/e2e_room_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/add_user_type_to_users.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/drop_sent_transactions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/event_format_version.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/event_format_version.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/user_ips_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/user_ips_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/user_share.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/user_share.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/user_threepid_id.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql (renamed from synapse/storage/data_stores/main/schema/delta/53/users_in_public_rooms.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/account_validity_with_renewal.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/add_validity_to_server_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/delete_forward_extremities.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/drop_legacy_tables.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/drop_presence_list.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/relations.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/relations.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/stats.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/stats.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/54/stats2.sql (renamed from synapse/storage/data_stores/main/schema/delta/54/stats2.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql (renamed from synapse/storage/data_stores/main/schema/delta/55/access_token_expiry.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql (renamed from synapse/storage/data_stores/main/schema/delta/55/track_threepid_validations.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql (renamed from synapse/storage/data_stores/main/schema/delta/55/users_alter_deactivated.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/add_spans_to_device_lists.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership_mk2.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/destinations_failure_ts.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/56/destinations_retry_interval_type.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/devices_last_seen.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/drop_unused_event_tables.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/event_expiry.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/event_labels.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/event_labels.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/fix_room_keys_index.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/hidden_devices.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/hidden_devices.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite (renamed from synapse/storage/data_stores/main/schema/delta/56/hidden_devices_fix.sql.sqlite)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/public_room_list_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/redaction_censor.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/room_key_etag.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/room_membership_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/room_retention.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/room_retention.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/signing_keys.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/stats_separated.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/stats_separated.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py (renamed from synapse/storage/data_stores/main/schema/delta/56/unique_user_filter_index.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/user_external_ids.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/user_external_ids.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/users_in_public_rooms_idx.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql (renamed from synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql (renamed from synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/local_current_membership.py (renamed from synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql (renamed from synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql (renamed from synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite (renamed from synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite (renamed from synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql (renamed from synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql (renamed from synapse/storage/data_stores/main/schema/delta/58/03persist_ui_auth.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/58/05cache_instance.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py (renamed from synapse/storage/data_stores/main/schema/delta/58/06dlols_unique_idx.py)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres (renamed from synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite (renamed from synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql (renamed from synapse/storage/data_stores/main/schema/delta/58/10drop_local_rejections_stream.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql (renamed from synapse/storage/data_stores/main/schema/delta/58/10federation_pos_instance_name.sql)0
-rw-r--r--synapse/storage/databases/main/schema/delta/58/11user_id_seq.py (renamed from synapse/storage/data_stores/main/schema/delta/58/11user_id_seq.py)2
-rw-r--r--synapse/storage/databases/main/schema/delta/58/12room_stats.sql32
-rw-r--r--synapse/storage/databases/main/schema/delta/58/12unread_messages.sql (renamed from synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/application_services.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/application_services.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/event_edges.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/event_signatures.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/im.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/im.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/keys.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/keys.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/media_repository.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/presence.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/presence.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/profiles.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/profiles.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/push.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/push.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/redactions.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/redactions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/room_aliases.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/state.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/state.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/transactions.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/transactions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/16/users.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/16/users.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres (renamed from synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite (renamed from synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql (renamed from synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql)0
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/README.md (renamed from synapse/storage/data_stores/main/schema/full_schemas/README.md)0
-rw-r--r--synapse/storage/databases/main/search.py (renamed from synapse/storage/data_stores/main/search.py)60
-rw-r--r--synapse/storage/databases/main/signatures.py (renamed from synapse/storage/data_stores/main/signatures.py)2
-rw-r--r--synapse/storage/databases/main/state.py (renamed from synapse/storage/data_stores/main/state.py)42
-rw-r--r--synapse/storage/databases/main/state_deltas.py (renamed from synapse/storage/data_stores/main/state_deltas.py)8
-rw-r--r--synapse/storage/databases/main/stats.py (renamed from synapse/storage/data_stores/main/stats.py)110
-rw-r--r--synapse/storage/databases/main/stream.py (renamed from synapse/storage/data_stores/main/stream.py)50
-rw-r--r--synapse/storage/databases/main/tags.py (renamed from synapse/storage/data_stores/main/tags.py)18
-rw-r--r--synapse/storage/databases/main/transactions.py (renamed from synapse/storage/data_stores/main/transactions.py)24
-rw-r--r--synapse/storage/databases/main/ui_auth.py (renamed from synapse/storage/data_stores/main/ui_auth.py)28
-rw-r--r--synapse/storage/databases/main/user_directory.py (renamed from synapse/storage/data_stores/main/user_directory.py)122
-rw-r--r--synapse/storage/databases/main/user_erasure_store.py (renamed from synapse/storage/data_stores/main/user_erasure_store.py)8
-rw-r--r--synapse/storage/databases/state/__init__.py (renamed from synapse/storage/data_stores/state/__init__.py)2
-rw-r--r--synapse/storage/databases/state/bg_updates.py (renamed from synapse/storage/data_stores/state/bg_updates.py)36
-rw-r--r--synapse/storage/databases/state/schema/delta/23/drop_state_index.sql (renamed from synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/30/state_stream.sql (renamed from synapse/storage/data_stores/state/schema/delta/30/state_stream.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql (renamed from synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/35/add_state_index.sql (renamed from synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/35/state.sql (renamed from synapse/storage/data_stores/state/schema/delta/35/state.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/35/state_dedupe.sql (renamed from synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql)0
-rw-r--r--synapse/storage/databases/state/schema/delta/47/state_group_seq.py (renamed from synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py)0
-rw-r--r--synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql (renamed from synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql)0
-rw-r--r--synapse/storage/databases/state/schema/full_schemas/54/full.sql (renamed from synapse/storage/data_stores/state/schema/full_schemas/54/full.sql)0
-rw-r--r--synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres (renamed from synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres)0
-rw-r--r--synapse/storage/databases/state/store.py (renamed from synapse/storage/data_stores/state/store.py)46
-rw-r--r--synapse/storage/persist_events.py6
-rw-r--r--synapse/storage/prepare_database.py48
-rw-r--r--synapse/storage/util/id_generators.py4
-rw-r--r--synapse/streams/events.py22
-rw-r--r--synapse/util/daemonize.py137
350 files changed, 1754 insertions, 1509 deletions
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 373a80a4a7..2b2cd795e0 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 import gc
 import logging
 import os
@@ -22,7 +21,6 @@ import sys
 import traceback
 from typing import Iterable
 
-from daemonize import Daemonize
 from typing_extensions import NoReturn
 
 from twisted.internet import defer, error, reactor
@@ -34,6 +32,7 @@ from synapse.config.server import ListenerConfig
 from synapse.crypto import context_factory
 from synapse.logging.context import PreserveLoggingContext
 from synapse.util.async_helpers import Linearizer
+from synapse.util.daemonize import daemonize_process
 from synapse.util.rlimit import change_resource_limit
 from synapse.util.versionstring import get_version_string
 
@@ -129,17 +128,8 @@ def start_reactor(
             if print_pidfile:
                 print(pid_file)
 
-            daemon = Daemonize(
-                app=appname,
-                pid=pid_file,
-                action=run,
-                auto_close_fds=False,
-                verbose=True,
-                logger=logger,
-            )
-            daemon.start()
-        else:
-            run()
+            daemonize_process(pid_file, logger)
+        run()
 
 
 def quit_with_error(error_string: str) -> NoReturn:
@@ -278,7 +268,7 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
 
         # It is now safe to start your Synapse.
         hs.start_listening(listeners)
-        hs.get_datastore().db.start_profiling()
+        hs.get_datastore().db_pool.start_profiling()
         hs.get_pusherpool().start()
 
         setup_sentry(hs)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index c478df53be..1a16d0b9f8 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -125,15 +125,15 @@ from synapse.rest.client.v2_alpha.register import RegisterRestServlet
 from synapse.rest.client.versions import VersionsRestServlet
 from synapse.rest.key.v2 import KeyApiV2Resource
 from synapse.server import HomeServer
-from synapse.storage.data_stores.main.censor_events import CensorEventsStore
-from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore
-from synapse.storage.data_stores.main.monthly_active_users import (
+from synapse.storage.databases.main.censor_events import CensorEventsStore
+from synapse.storage.databases.main.media_repository import MediaRepositoryStore
+from synapse.storage.databases.main.monthly_active_users import (
     MonthlyActiveUsersWorkerStore,
 )
-from synapse.storage.data_stores.main.presence import UserPresenceState
-from synapse.storage.data_stores.main.search import SearchWorkerStore
-from synapse.storage.data_stores.main.ui_auth import UIAuthWorkerStore
-from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
+from synapse.storage.databases.main.presence import UserPresenceState
+from synapse.storage.databases.main.search import SearchWorkerStore
+from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
+from synapse.storage.databases.main.user_directory import UserDirectoryStore
 from synapse.types import ReadReceipt
 from synapse.util.async_helpers import Linearizer
 from synapse.util.httpresourcetree import create_resource_tree
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index ec7401f911..d87a77718e 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -380,13 +380,12 @@ def setup(config_options):
 
     hs.setup_master()
 
-    @defer.inlineCallbacks
-    def do_acme():
+    async def do_acme() -> bool:
         """
         Reprovision an ACME certificate, if it's required.
 
         Returns:
-            Deferred[bool]: Whether the cert has been updated.
+            Whether the cert has been updated.
         """
         acme = hs.get_acme_handler()
 
@@ -405,7 +404,7 @@ def setup(config_options):
             provision = True
 
         if provision:
-            yield acme.provision_certificate()
+            await acme.provision_certificate()
 
         return provision
 
@@ -415,7 +414,7 @@ def setup(config_options):
         Provision a certificate from ACME, if required, and reload the TLS
         certificate if it's renewed.
         """
-        reprovisioned = yield do_acme()
+        reprovisioned = yield defer.ensureDeferred(do_acme())
         if reprovisioned:
             _base.refresh_certificate(hs)
 
@@ -427,8 +426,8 @@ def setup(config_options):
                 acme = hs.get_acme_handler()
                 # Start up the webservices which we will respond to ACME
                 # challenges with, and then provision.
-                yield acme.start_listening()
-                yield do_acme()
+                yield defer.ensureDeferred(acme.start_listening())
+                yield defer.ensureDeferred(do_acme())
 
                 # Check if it needs to be reprovisioned every day.
                 hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000)
@@ -442,7 +441,7 @@ def setup(config_options):
 
             _base.start(hs, config.listeners)
 
-            hs.get_datastore().db.updates.start_doing_background_updates()
+            hs.get_datastore().db_pool.updates.start_doing_background_updates()
         except Exception:
             # Print the exception and bail out.
             print("Error during startup:", file=sys.stderr)
@@ -552,8 +551,8 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
     #
 
     # This only reports info about the *main* database.
-    stats["database_engine"] = hs.get_datastore().db.engine.module.__name__
-    stats["database_server_version"] = hs.get_datastore().db.engine.server_version
+    stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__
+    stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version
 
     logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
     try:
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index db578bda79..e72a0b9ac0 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -175,7 +175,7 @@ class ApplicationServiceApi(SimpleHttpClient):
                 urllib.parse.quote(protocol),
             )
             try:
-                info = yield self.get_json(uri, {})
+                info = yield defer.ensureDeferred(self.get_json(uri, {}))
 
                 if not _is_valid_3pe_metadata(info):
                     logger.warning(
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 62bccd9ef5..8a18a9ca2a 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -100,7 +100,10 @@ class DatabaseConnectionConfig:
 
         self.name = name
         self.config = db_config
-        self.data_stores = data_stores
+
+        # The `data_stores` config is actually talking about `databases` (we
+        # changed the name).
+        self.databases = data_stores
 
 
 class DatabaseConfig(Config):
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 2dd94bae2b..b2c78ac40c 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -93,6 +93,15 @@ class RatelimitConfig(Config):
         if rc_admin_redaction:
             self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
 
+        self.rc_joins_local = RateLimitConfig(
+            config.get("rc_joins", {}).get("local", {}),
+            defaults={"per_second": 0.1, "burst_count": 3},
+        )
+        self.rc_joins_remote = RateLimitConfig(
+            config.get("rc_joins", {}).get("remote", {}),
+            defaults={"per_second": 0.01, "burst_count": 3},
+        )
+
     def generate_config_section(self, **kwargs):
         return """\
         ## Ratelimiting ##
@@ -118,6 +127,10 @@ class RatelimitConfig(Config):
         #   - one for ratelimiting redactions by room admins. If this is not explicitly
         #     set then it uses the same ratelimiting as per rc_message. This is useful
         #     to allow room admins to deal with abuse quickly.
+        #   - two for ratelimiting number of rooms a user can join, "local" for when
+        #     users are joining rooms the server is already in (this is cheap) vs
+        #     "remote" for when users are trying to join rooms not on the server (which
+        #     can be more expensive)
         #
         # The defaults are as shown below.
         #
@@ -143,6 +156,14 @@ class RatelimitConfig(Config):
         #rc_admin_redaction:
         #  per_second: 1
         #  burst_count: 50
+        #
+        #rc_joins:
+        #  local:
+        #    per_second: 0.1
+        #    burst_count: 3
+        #  remote:
+        #    per_second: 0.01
+        #    burst_count: 3
 
 
         # Ratelimiting settings for incoming federation
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 443cde0b6d..28ef7cfdb9 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -223,8 +223,7 @@ class Keyring(object):
 
         return results
 
-    @defer.inlineCallbacks
-    def _start_key_lookups(self, verify_requests):
+    async def _start_key_lookups(self, verify_requests):
         """Sets off the key fetches for each verify request
 
         Once each fetch completes, verify_request.key_ready will be resolved.
@@ -245,7 +244,7 @@ class Keyring(object):
                 server_to_request_ids.setdefault(server_name, set()).add(request_id)
 
             # Wait for any previous lookups to complete before proceeding.
-            yield self.wait_for_previous_lookups(server_to_request_ids.keys())
+            await self.wait_for_previous_lookups(server_to_request_ids.keys())
 
             # take out a lock on each of the servers by sticking a Deferred in
             # key_downloads
@@ -283,15 +282,14 @@ class Keyring(object):
         except Exception:
             logger.exception("Error starting key lookups")
 
-    @defer.inlineCallbacks
-    def wait_for_previous_lookups(self, server_names):
+    async def wait_for_previous_lookups(self, server_names) -> None:
         """Waits for any previous key lookups for the given servers to finish.
 
         Args:
             server_names (Iterable[str]): list of servers which we want to look up
 
         Returns:
-            Deferred[None]: resolves once all key lookups for the given servers have
+            Resolves once all key lookups for the given servers have
                 completed. Follows the synapse rules of logcontext preservation.
         """
         loop_count = 1
@@ -309,7 +307,7 @@ class Keyring(object):
                 loop_count,
             )
             with PreserveLoggingContext():
-                yield defer.DeferredList((w[1] for w in wait_on))
+                await defer.DeferredList((w[1] for w in wait_on))
 
             loop_count += 1
 
@@ -326,44 +324,44 @@ class Keyring(object):
 
         remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
 
-        @defer.inlineCallbacks
-        def do_iterations():
-            with Measure(self.clock, "get_server_verify_keys"):
-                for f in self._key_fetchers:
-                    if not remaining_requests:
-                        return
-                    yield self._attempt_key_fetches_with_fetcher(f, remaining_requests)
+        async def do_iterations():
+            try:
+                with Measure(self.clock, "get_server_verify_keys"):
+                    for f in self._key_fetchers:
+                        if not remaining_requests:
+                            return
+                        await self._attempt_key_fetches_with_fetcher(
+                            f, remaining_requests
+                        )
 
-                # look for any requests which weren't satisfied
+                    # look for any requests which weren't satisfied
+                    with PreserveLoggingContext():
+                        for verify_request in remaining_requests:
+                            verify_request.key_ready.errback(
+                                SynapseError(
+                                    401,
+                                    "No key for %s with ids in %s (min_validity %i)"
+                                    % (
+                                        verify_request.server_name,
+                                        verify_request.key_ids,
+                                        verify_request.minimum_valid_until_ts,
+                                    ),
+                                    Codes.UNAUTHORIZED,
+                                )
+                            )
+            except Exception as err:
+                # we don't really expect to get here, because any errors should already
+                # have been caught and logged. But if we do, let's log the error and make
+                # sure that all of the deferreds are resolved.
+                logger.error("Unexpected error in _get_server_verify_keys: %s", err)
                 with PreserveLoggingContext():
                     for verify_request in remaining_requests:
-                        verify_request.key_ready.errback(
-                            SynapseError(
-                                401,
-                                "No key for %s with ids in %s (min_validity %i)"
-                                % (
-                                    verify_request.server_name,
-                                    verify_request.key_ids,
-                                    verify_request.minimum_valid_until_ts,
-                                ),
-                                Codes.UNAUTHORIZED,
-                            )
-                        )
-
-        def on_err(err):
-            # we don't really expect to get here, because any errors should already
-            # have been caught and logged. But if we do, let's log the error and make
-            # sure that all of the deferreds are resolved.
-            logger.error("Unexpected error in _get_server_verify_keys: %s", err)
-            with PreserveLoggingContext():
-                for verify_request in remaining_requests:
-                    if not verify_request.key_ready.called:
-                        verify_request.key_ready.errback(err)
+                        if not verify_request.key_ready.called:
+                            verify_request.key_ready.errback(err)
 
-        run_in_background(do_iterations).addErrback(on_err)
+        run_in_background(do_iterations)
 
-    @defer.inlineCallbacks
-    def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests):
+    async def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests):
         """Use a key fetcher to attempt to satisfy some key requests
 
         Args:
@@ -390,7 +388,7 @@ class Keyring(object):
                     verify_request.minimum_valid_until_ts,
                 )
 
-        results = yield fetcher.get_keys(missing_keys)
+        results = await fetcher.get_keys(missing_keys)
 
         completed = []
         for verify_request in remaining_requests:
@@ -423,7 +421,7 @@ class Keyring(object):
 
 
 class KeyFetcher(object):
-    def get_keys(self, keys_to_fetch):
+    async def get_keys(self, keys_to_fetch):
         """
         Args:
             keys_to_fetch (dict[str, dict[str, int]]):
@@ -442,8 +440,7 @@ class StoreKeyFetcher(KeyFetcher):
     def __init__(self, hs):
         self.store = hs.get_datastore()
 
-    @defer.inlineCallbacks
-    def get_keys(self, keys_to_fetch):
+    async def get_keys(self, keys_to_fetch):
         """see KeyFetcher.get_keys"""
 
         keys_to_fetch = (
@@ -452,7 +449,7 @@ class StoreKeyFetcher(KeyFetcher):
             for key_id in keys_for_server.keys()
         )
 
-        res = yield self.store.get_server_verify_keys(keys_to_fetch)
+        res = await self.store.get_server_verify_keys(keys_to_fetch)
         keys = {}
         for (server_name, key_id), key in res.items():
             keys.setdefault(server_name, {})[key_id] = key
@@ -464,8 +461,7 @@ class BaseV2KeyFetcher(object):
         self.store = hs.get_datastore()
         self.config = hs.get_config()
 
-    @defer.inlineCallbacks
-    def process_v2_response(self, from_server, response_json, time_added_ms):
+    async def process_v2_response(self, from_server, response_json, time_added_ms):
         """Parse a 'Server Keys' structure from the result of a /key request
 
         This is used to parse either the entirety of the response from
@@ -537,7 +533,7 @@ class BaseV2KeyFetcher(object):
 
         key_json_bytes = encode_canonical_json(response_json)
 
-        yield make_deferred_yieldable(
+        await make_deferred_yieldable(
             defer.gatherResults(
                 [
                     run_in_background(
@@ -567,14 +563,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
         self.client = hs.get_http_client()
         self.key_servers = self.config.key_servers
 
-    @defer.inlineCallbacks
-    def get_keys(self, keys_to_fetch):
+    async def get_keys(self, keys_to_fetch):
         """see KeyFetcher.get_keys"""
 
-        @defer.inlineCallbacks
-        def get_key(key_server):
+        async def get_key(key_server):
             try:
-                result = yield self.get_server_verify_key_v2_indirect(
+                result = await self.get_server_verify_key_v2_indirect(
                     keys_to_fetch, key_server
                 )
                 return result
@@ -592,7 +586,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
 
             return {}
 
-        results = yield make_deferred_yieldable(
+        results = await make_deferred_yieldable(
             defer.gatherResults(
                 [run_in_background(get_key, server) for server in self.key_servers],
                 consumeErrors=True,
@@ -606,8 +600,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
 
         return union_of_keys
 
-    @defer.inlineCallbacks
-    def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
+    async def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server):
         """
         Args:
             keys_to_fetch (dict[str, dict[str, int]]):
@@ -617,7 +610,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
                 the keys
 
         Returns:
-            Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map
+            dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]: map
                 from server_name -> key_id -> FetchKeyResult
 
         Raises:
@@ -632,20 +625,18 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
         )
 
         try:
-            query_response = yield defer.ensureDeferred(
-                self.client.post_json(
-                    destination=perspective_name,
-                    path="/_matrix/key/v2/query",
-                    data={
-                        "server_keys": {
-                            server_name: {
-                                key_id: {"minimum_valid_until_ts": min_valid_ts}
-                                for key_id, min_valid_ts in server_keys.items()
-                            }
-                            for server_name, server_keys in keys_to_fetch.items()
+            query_response = await self.client.post_json(
+                destination=perspective_name,
+                path="/_matrix/key/v2/query",
+                data={
+                    "server_keys": {
+                        server_name: {
+                            key_id: {"minimum_valid_until_ts": min_valid_ts}
+                            for key_id, min_valid_ts in server_keys.items()
                         }
-                    },
-                )
+                        for server_name, server_keys in keys_to_fetch.items()
+                    }
+                },
             )
         except (NotRetryingDestination, RequestSendFailed) as e:
             # these both have str() representations which we can't really improve upon
@@ -670,7 +661,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
             try:
                 self._validate_perspectives_response(key_server, response)
 
-                processed_response = yield self.process_v2_response(
+                processed_response = await self.process_v2_response(
                     perspective_name, response, time_added_ms=time_now_ms
                 )
             except KeyLookupError as e:
@@ -689,7 +680,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
             )
             keys.setdefault(server_name, {}).update(processed_response)
 
-        yield self.store.store_server_verify_keys(
+        await self.store.store_server_verify_keys(
             perspective_name, time_now_ms, added_keys
         )
 
@@ -741,24 +732,23 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
         self.clock = hs.get_clock()
         self.client = hs.get_http_client()
 
-    def get_keys(self, keys_to_fetch):
+    async def get_keys(self, keys_to_fetch):
         """
         Args:
             keys_to_fetch (dict[str, iterable[str]]):
                 the keys to be fetched. server_name -> key_ids
 
         Returns:
-            Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]:
+            dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]:
                 map from server_name -> key_id -> FetchKeyResult
         """
 
         results = {}
 
-        @defer.inlineCallbacks
-        def get_key(key_to_fetch_item):
+        async def get_key(key_to_fetch_item):
             server_name, key_ids = key_to_fetch_item
             try:
-                keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids)
+                keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
                 results[server_name] = keys
             except KeyLookupError as e:
                 logger.warning(
@@ -767,12 +757,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
             except Exception:
                 logger.exception("Error getting keys %s from %s", key_ids, server_name)
 
-        return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback(
-            lambda _: results
-        )
+        return await yieldable_gather_results(
+            get_key, keys_to_fetch.items()
+        ).addCallback(lambda _: results)
 
-    @defer.inlineCallbacks
-    def get_server_verify_key_v2_direct(self, server_name, key_ids):
+    async def get_server_verify_key_v2_direct(self, server_name, key_ids):
         """
 
         Args:
@@ -794,25 +783,23 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
 
             time_now_ms = self.clock.time_msec()
             try:
-                response = yield defer.ensureDeferred(
-                    self.client.get_json(
-                        destination=server_name,
-                        path="/_matrix/key/v2/server/"
-                        + urllib.parse.quote(requested_key_id),
-                        ignore_backoff=True,
-                        # we only give the remote server 10s to respond. It should be an
-                        # easy request to handle, so if it doesn't reply within 10s, it's
-                        # probably not going to.
-                        #
-                        # Furthermore, when we are acting as a notary server, we cannot
-                        # wait all day for all of the origin servers, as the requesting
-                        # server will otherwise time out before we can respond.
-                        #
-                        # (Note that get_json may make 4 attempts, so this can still take
-                        # almost 45 seconds to fetch the headers, plus up to another 60s to
-                        # read the response).
-                        timeout=10000,
-                    )
+                response = await self.client.get_json(
+                    destination=server_name,
+                    path="/_matrix/key/v2/server/"
+                    + urllib.parse.quote(requested_key_id),
+                    ignore_backoff=True,
+                    # we only give the remote server 10s to respond. It should be an
+                    # easy request to handle, so if it doesn't reply within 10s, it's
+                    # probably not going to.
+                    #
+                    # Furthermore, when we are acting as a notary server, we cannot
+                    # wait all day for all of the origin servers, as the requesting
+                    # server will otherwise time out before we can respond.
+                    #
+                    # (Note that get_json may make 4 attempts, so this can still take
+                    # almost 45 seconds to fetch the headers, plus up to another 60s to
+                    # read the response).
+                    timeout=10000,
                 )
             except (NotRetryingDestination, RequestSendFailed) as e:
                 # these both have str() representations which we can't really improve
@@ -827,12 +814,12 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
                     % (server_name, response["server_name"])
                 )
 
-            response_keys = yield self.process_v2_response(
+            response_keys = await self.process_v2_response(
                 from_server=server_name,
                 response_json=response,
                 time_added_ms=time_now_ms,
             )
-            yield self.store.store_server_verify_keys(
+            await self.store.store_server_verify_keys(
                 server_name,
                 time_now_ms,
                 ((server_name, key_id, key) for key_id, key in response_keys.items()),
@@ -842,22 +829,18 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
         return keys
 
 
-@defer.inlineCallbacks
-def _handle_key_deferred(verify_request):
+async def _handle_key_deferred(verify_request) -> None:
     """Waits for the key to become available, and then performs a verification
 
     Args:
         verify_request (VerifyJsonRequest):
 
-    Returns:
-        Deferred[None]
-
     Raises:
         SynapseError if there was a problem performing the verification
     """
     server_name = verify_request.server_name
     with PreserveLoggingContext():
-        _, key_id, verify_key = yield verify_request.key_ready
+        _, key_id, verify_key = await verify_request.key_ready
 
     json_object = verify_request.json_object
 
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index cca93e3a46..afecafe15c 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -23,7 +23,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.types import StateMap
 
 if TYPE_CHECKING:
-    from synapse.storage.data_stores.main import DataStore
+    from synapse.storage.databases.main import DataStore
 
 
 @attr.s(slots=True)
diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py
index a2d7959abe..7666d3abcd 100644
--- a/synapse/handlers/acme.py
+++ b/synapse/handlers/acme.py
@@ -17,7 +17,6 @@ import logging
 
 import twisted
 import twisted.internet.error
-from twisted.internet import defer
 from twisted.web import server, static
 from twisted.web.resource import Resource
 
@@ -41,8 +40,7 @@ class AcmeHandler(object):
         self.reactor = hs.get_reactor()
         self._acme_domain = hs.config.acme_domain
 
-    @defer.inlineCallbacks
-    def start_listening(self):
+    async def start_listening(self):
         from synapse.handlers import acme_issuing_service
 
         # Configure logging for txacme, if you need to debug
@@ -82,18 +80,17 @@ class AcmeHandler(object):
         self._issuer._registered = False
 
         try:
-            yield self._issuer._ensure_registered()
+            await self._issuer._ensure_registered()
         except Exception:
             logger.error(ACME_REGISTER_FAIL_ERROR)
             raise
 
-    @defer.inlineCallbacks
-    def provision_certificate(self):
+    async def provision_certificate(self):
 
         logger.warning("Reprovisioning %s", self._acme_domain)
 
         try:
-            yield self._issuer.issue_cert(self._acme_domain)
+            await self._issuer.issue_cert(self._acme_domain)
         except Exception:
             logger.exception("Fail!")
             raise
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 0d7d1adcea..b3764dedae 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -71,7 +71,7 @@ from synapse.replication.http.federation import (
 )
 from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
 from synapse.state import StateResolutionStore, resolve_events_with_store
-from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
 from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer, concurrently_execute
 from synapse.util.distributor import user_joined_room
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 0bd2c3e37a..92b7404706 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -22,14 +22,10 @@ import urllib.parse
 from typing import Awaitable, Callable, Dict, List, Optional, Tuple
 
 from canonicaljson import json
-from signedjson.key import decode_verify_key_bytes
-from signedjson.sign import verify_signed_json
-from unpaddedbase64 import decode_base64
 
 from twisted.internet.error import TimeoutError
 
 from synapse.api.errors import (
-    AuthError,
     CodeMessageException,
     Codes,
     HttpResponseException,
@@ -628,9 +624,9 @@ class IdentityHandler(BaseHandler):
             )
 
             if "mxid" in data:
-                if "signatures" not in data:
-                    raise AuthError(401, "No signatures on 3pid binding")
-                await self._verify_any_signature(data, id_server)
+                # note: we used to verify the identity server's signature here, but no longer
+                # require or validate it. See the following for context:
+                # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
                 return data["mxid"]
         except TimeoutError:
             raise SynapseError(500, "Timed out contacting identity server")
@@ -751,30 +747,6 @@ class IdentityHandler(BaseHandler):
         mxid = lookup_results["mappings"].get(lookup_value)
         return mxid
 
-    async def _verify_any_signature(self, data, server_hostname):
-        if server_hostname not in data["signatures"]:
-            raise AuthError(401, "No signature from server %s" % (server_hostname,))
-        for key_name, signature in data["signatures"][server_hostname].items():
-            try:
-                key_data = await self.blacklisting_http_client.get_json(
-                    "%s%s/_matrix/identity/api/v1/pubkey/%s"
-                    % (id_server_scheme, server_hostname, key_name)
-                )
-            except TimeoutError:
-                raise SynapseError(500, "Timed out contacting identity server")
-            if "public_key" not in key_data:
-                raise AuthError(
-                    401, "No public key named %s from %s" % (key_name, server_hostname)
-                )
-            verify_signed_json(
-                data,
-                server_hostname,
-                decode_verify_key_bytes(
-                    key_name, decode_base64(key_data["public_key"])
-                ),
-            )
-            return
-
     async def ask_id_server_for_third_party_invite(
         self,
         requester: Requester,
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index f88bad5f25..ae6bd1d352 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -109,7 +109,7 @@ class InitialSyncHandler(BaseHandler):
 
         rooms_ret = []
 
-        now_token = await self.hs.get_event_sources().get_current_token()
+        now_token = self.hs.get_event_sources().get_current_token()
 
         presence_stream = self.hs.get_event_sources().sources["presence"]
         pagination_config = PaginationConfig(from_token=now_token)
@@ -360,7 +360,7 @@ class InitialSyncHandler(BaseHandler):
             current_state.values(), time_now
         )
 
-        now_token = await self.hs.get_event_sources().get_current_token()
+        now_token = self.hs.get_event_sources().get_current_token()
 
         limit = pagin_config.limit if pagin_config else None
         if limit is None:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index e451d6dc86..43901d0934 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -45,7 +45,7 @@ from synapse.events.validator import EventValidator
 from synapse.logging.context import run_in_background
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.replication.http.send_event import ReplicationSendEventRestServlet
-from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
 from synapse.storage.state import StateFilter
 from synapse.types import (
     Collection,
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index da06582d4b..487420bb5d 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -309,7 +309,7 @@ class PaginationHandler(object):
             room_token = pagin_config.from_token.room_key
         else:
             pagin_config.from_token = (
-                await self.hs.get_event_sources().get_current_token_for_pagination()
+                self.hs.get_event_sources().get_current_token_for_pagination()
             )
             room_token = pagin_config.from_token.room_key
 
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index b3a3bb8c3f..5387b3724f 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -38,7 +38,7 @@ from synapse.logging.utils import log_function
 from synapse.metrics import LaterGauge
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.state import StateHandler
-from synapse.storage.data_stores.main import DataStore
+from synapse.storage.databases.main import DataStore
 from synapse.storage.presence import UserPresenceState
 from synapse.types import JsonDict, UserID, get_domain_from_id
 from synapse.util.async_helpers import Linearizer
@@ -319,7 +319,7 @@ class PresenceHandler(BasePresenceHandler):
         is some spurious presence changes that will self-correct.
         """
         # If the DB pool has already terminated, don't try updating
-        if not self.store.db.is_running():
+        if not self.store.db_pool.is_running():
             return
 
         logger.info(
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 501f0fe795..c94209ab3d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -548,7 +548,7 @@ class RegistrationHandler(BaseHandler):
             address (str|None): the IP address used to perform the registration.
 
         Returns:
-            Deferred
+            Awaitable
         """
         if self.hs.config.worker_app:
             return self._register_client(
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 0c5b99234d..a8545255b1 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -22,7 +22,7 @@ import logging
 import math
 import string
 from collections import OrderedDict
-from typing import Optional, Tuple
+from typing import Awaitable, Optional, Tuple
 
 from synapse.api.constants import (
     EventTypes,
@@ -1041,7 +1041,7 @@ class RoomEventSource(object):
     ):
         # We just ignore the key for now.
 
-        to_key = await self.get_current_key()
+        to_key = self.get_current_key()
 
         from_token = RoomStreamToken.parse(from_key)
         if from_token.topological:
@@ -1081,10 +1081,10 @@ class RoomEventSource(object):
 
         return (events, end_key)
 
-    def get_current_key(self):
-        return self.store.get_room_events_max_id()
+    def get_current_key(self) -> str:
+        return "s%d" % (self.store.get_room_max_stream_ordering(),)
 
-    def get_current_key_for_room(self, room_id):
+    def get_current_key_for_room(self, room_id: str) -> Awaitable[str]:
         return self.store.get_room_events_max_id(room_id)
 
 
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 78586a0a1e..8e409f24e8 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -22,7 +22,8 @@ from unpaddedbase64 import encode_base64
 
 from synapse import types
 from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
-from synapse.api.errors import AuthError, Codes, SynapseError
+from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
+from synapse.api.ratelimiting import Ratelimiter
 from synapse.api.room_versions import EventFormatVersions
 from synapse.crypto.event_signing import compute_event_reference_hash
 from synapse.events import EventBase
@@ -77,6 +78,17 @@ class RoomMemberHandler(object):
         if self._is_on_event_persistence_instance:
             self.persist_event_storage = hs.get_storage().persistence
 
+        self._join_rate_limiter_local = Ratelimiter(
+            clock=self.clock,
+            rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
+            burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
+        )
+        self._join_rate_limiter_remote = Ratelimiter(
+            clock=self.clock,
+            rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
+            burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
+        )
+
         # This is only used to get at ratelimit function, and
         # maybe_kick_guest_users. It's fine there are multiple of these as
         # it doesn't store state.
@@ -441,7 +453,28 @@ class RoomMemberHandler(object):
                     # so don't really fit into the general auth process.
                     raise AuthError(403, "Guest access not allowed")
 
-            if not is_host_in_room:
+            if is_host_in_room:
+                time_now_s = self.clock.time()
+                allowed, time_allowed = self._join_rate_limiter_local.can_do_action(
+                    requester.user.to_string(),
+                )
+
+                if not allowed:
+                    raise LimitExceededError(
+                        retry_after_ms=int(1000 * (time_allowed - time_now_s))
+                    )
+
+            else:
+                time_now_s = self.clock.time()
+                allowed, time_allowed = self._join_rate_limiter_remote.can_do_action(
+                    requester.user.to_string(),
+                )
+
+                if not allowed:
+                    raise LimitExceededError(
+                        retry_after_ms=int(1000 * (time_allowed - time_now_s))
+                    )
+
                 inviter = await self._get_inviter(target.to_string(), room_id)
                 if inviter and not self.hs.is_mine(inviter):
                     remote_room_hosts.append(inviter.domain)
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 9b312a1558..d58f9788c5 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -340,7 +340,7 @@ class SearchHandler(BaseHandler):
         # If client has asked for "context" for each event (i.e. some surrounding
         # events and state), fetch that
         if event_context is not None:
-            now_token = await self.hs.get_event_sources().get_current_token()
+            now_token = self.hs.get_event_sources().get_current_token()
 
             contexts = {}
             for event in allowed_events:
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 149f861239..249ffe2a55 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -232,7 +232,7 @@ class StatsHandler:
 
                 if membership == prev_membership:
                     pass  # noop
-                if membership == Membership.JOIN:
+                elif membership == Membership.JOIN:
                     room_stats_delta["joined_members"] += 1
                 elif membership == Membership.INVITE:
                     room_stats_delta["invited_members"] += 1
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index eaa4eeadf7..5a19bac929 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -961,7 +961,7 @@ class SyncHandler(object):
         # this is due to some of the underlying streams not supporting the ability
         # to query up to a given point.
         # Always use the `now_token` in `SyncResultBuilder`
-        now_token = await self.event_sources.get_current_token()
+        now_token = self.event_sources.get_current_token()
 
         logger.debug(
             "Calculating sync response for %r between %s and %s",
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 155b7460d4..529532a063 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -284,8 +284,7 @@ class SimpleHttpClient(object):
                 ip_blacklist=self._ip_blacklist,
             )
 
-    @defer.inlineCallbacks
-    def request(self, method, uri, data=None, headers=None):
+    async def request(self, method, uri, data=None, headers=None):
         """
         Args:
             method (str): HTTP method to use.
@@ -330,7 +329,7 @@ class SimpleHttpClient(object):
                     self.hs.get_reactor(),
                     cancelled_to_request_timed_out_error,
                 )
-                response = yield make_deferred_yieldable(request_deferred)
+                response = await make_deferred_yieldable(request_deferred)
 
                 incoming_responses_counter.labels(method, response.code).inc()
                 logger.info(
@@ -353,8 +352,7 @@ class SimpleHttpClient(object):
                 set_tag("error_reason", e.args[0])
                 raise
 
-    @defer.inlineCallbacks
-    def post_urlencoded_get_json(self, uri, args={}, headers=None):
+    async def post_urlencoded_get_json(self, uri, args={}, headers=None):
         """
         Args:
             uri (str):
@@ -363,7 +361,7 @@ class SimpleHttpClient(object):
                header name to a list of values for that header
 
         Returns:
-            Deferred[object]: parsed json
+            object: parsed json
 
         Raises:
             HttpResponseException: On a non-2xx HTTP response.
@@ -386,11 +384,11 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request(
+        response = await self.request(
             "POST", uri, headers=Headers(actual_headers), data=query_bytes
         )
 
-        body = yield make_deferred_yieldable(readBody(response))
+        body = await make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
@@ -399,8 +397,7 @@ class SimpleHttpClient(object):
                 response.code, response.phrase.decode("ascii", errors="replace"), body
             )
 
-    @defer.inlineCallbacks
-    def post_json_get_json(self, uri, post_json, headers=None):
+    async def post_json_get_json(self, uri, post_json, headers=None):
         """
 
         Args:
@@ -410,7 +407,7 @@ class SimpleHttpClient(object):
                header name to a list of values for that header
 
         Returns:
-            Deferred[object]: parsed json
+            object: parsed json
 
         Raises:
             HttpResponseException: On a non-2xx HTTP response.
@@ -429,11 +426,11 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request(
+        response = await self.request(
             "POST", uri, headers=Headers(actual_headers), data=json_str
         )
 
-        body = yield make_deferred_yieldable(readBody(response))
+        body = await make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
@@ -442,8 +439,7 @@ class SimpleHttpClient(object):
                 response.code, response.phrase.decode("ascii", errors="replace"), body
             )
 
-    @defer.inlineCallbacks
-    def get_json(self, uri, args={}, headers=None):
+    async def get_json(self, uri, args={}, headers=None):
         """ Gets some json from the given URI.
 
         Args:
@@ -455,7 +451,7 @@ class SimpleHttpClient(object):
             headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from
                header name to a list of values for that header
         Returns:
-            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+            Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body as JSON.
         Raises:
             HttpResponseException On a non-2xx HTTP response.
@@ -466,11 +462,10 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        body = yield self.get_raw(uri, args, headers=headers)
+        body = await self.get_raw(uri, args, headers=headers)
         return json.loads(body.decode("utf-8"))
 
-    @defer.inlineCallbacks
-    def put_json(self, uri, json_body, args={}, headers=None):
+    async def put_json(self, uri, json_body, args={}, headers=None):
         """ Puts some json to the given URI.
 
         Args:
@@ -483,7 +478,7 @@ class SimpleHttpClient(object):
             headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from
                header name to a list of values for that header
         Returns:
-            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+            Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body as JSON.
         Raises:
             HttpResponseException On a non-2xx HTTP response.
@@ -504,11 +499,11 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request(
+        response = await self.request(
             "PUT", uri, headers=Headers(actual_headers), data=json_str
         )
 
-        body = yield make_deferred_yieldable(readBody(response))
+        body = await make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
             return json.loads(body.decode("utf-8"))
@@ -517,8 +512,7 @@ class SimpleHttpClient(object):
                 response.code, response.phrase.decode("ascii", errors="replace"), body
             )
 
-    @defer.inlineCallbacks
-    def get_raw(self, uri, args={}, headers=None):
+    async def get_raw(self, uri, args={}, headers=None):
         """ Gets raw text from the given URI.
 
         Args:
@@ -530,7 +524,7 @@ class SimpleHttpClient(object):
             headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from
                header name to a list of values for that header
         Returns:
-            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
+            Succeeds when we get *any* 2xx HTTP response, with the
             HTTP body as bytes.
         Raises:
             HttpResponseException on a non-2xx HTTP response.
@@ -543,9 +537,9 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request("GET", uri, headers=Headers(actual_headers))
+        response = await self.request("GET", uri, headers=Headers(actual_headers))
 
-        body = yield make_deferred_yieldable(readBody(response))
+        body = await make_deferred_yieldable(readBody(response))
 
         if 200 <= response.code < 300:
             return body
@@ -557,8 +551,7 @@ class SimpleHttpClient(object):
     # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
     # The two should be factored out.
 
-    @defer.inlineCallbacks
-    def get_file(self, url, output_stream, max_size=None, headers=None):
+    async def get_file(self, url, output_stream, max_size=None, headers=None):
         """GETs a file from a given URL
         Args:
             url (str): The URL to GET
@@ -574,7 +567,7 @@ class SimpleHttpClient(object):
         if headers:
             actual_headers.update(headers)
 
-        response = yield self.request("GET", url, headers=Headers(actual_headers))
+        response = await self.request("GET", url, headers=Headers(actual_headers))
 
         resp_headers = dict(response.headers.getAllRawHeaders())
 
@@ -598,7 +591,7 @@ class SimpleHttpClient(object):
         # straight back in again
 
         try:
-            length = yield make_deferred_yieldable(
+            length = await make_deferred_yieldable(
                 _readBodyToFile(response, output_stream, max_size)
             )
         except SynapseError:
diff --git a/synapse/http/server.py b/synapse/http/server.py
index d4f9ad6e67..94ab29974a 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -242,10 +242,12 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
         no appropriate method exists. Can be overriden in sub classes for
         different routing.
         """
+        # Treat HEAD requests as GET requests.
+        request_method = request.method.decode("ascii")
+        if request_method == "HEAD":
+            request_method = "GET"
 
-        method_handler = getattr(
-            self, "_async_render_%s" % (request.method.decode("ascii"),), None
-        )
+        method_handler = getattr(self, "_async_render_%s" % (request_method,), None)
         if method_handler:
             raw_callback_return = method_handler(request)
 
@@ -362,11 +364,15 @@ class JsonResource(DirectServeJsonResource):
             A tuple of the callback to use, the name of the servlet, and the
             key word arguments to pass to the callback
         """
+        # Treat HEAD requests as GET requests.
         request_path = request.path.decode("ascii")
+        request_method = request.method
+        if request_method == b"HEAD":
+            request_method = b"GET"
 
         # Loop through all the registered callbacks to check if the method
         # and path regex match
-        for path_entry in self.path_regexs.get(request.method, []):
+        for path_entry in self.path_regexs.get(request_method, []):
             m = path_entry.pattern.match(request_path)
             if m:
                 # We found a match!
@@ -579,7 +585,7 @@ def set_cors_headers(request: Request):
     """
     request.setHeader(b"Access-Control-Allow-Origin", b"*")
     request.setHeader(
-        b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
+        b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
     )
     request.setHeader(
         b"Access-Control-Allow-Headers",
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index a7849cefa5..8201849951 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -219,7 +219,7 @@ class ModuleApi(object):
         Returns:
             Deferred[object]: result of func
         """
-        return self._store.db.runInteraction(desc, func, *args, **kwargs)
+        return self._store.db_pool.runInteraction(desc, func, *args, **kwargs)
 
     def complete_sso_login(
         self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
diff --git a/synapse/notifier.py b/synapse/notifier.py
index bd41f77852..22ab4a9da5 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -320,7 +320,7 @@ class Notifier(object):
         """
         user_stream = self.user_to_user_stream.get(user_id)
         if user_stream is None:
-            current_token = await self.event_sources.get_current_token()
+            current_token = self.event_sources.get_current_token()
             if room_ids is None:
                 room_ids = await self.store.get_rooms_for_user(user_id)
             user_stream = _NotifierUserStream(
@@ -397,7 +397,7 @@ class Notifier(object):
         """
         from_token = pagination_config.from_token
         if not from_token:
-            from_token = await self.event_sources.get_current_token()
+            from_token = self.event_sources.get_current_token()
 
         limit = pagination_config.limit
 
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index abea2be4ef..e5f22fb858 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -59,7 +59,6 @@ REQUIREMENTS = [
     "pyyaml>=3.11",
     "pyasn1>=0.1.9",
     "pyasn1-modules>=0.0.7",
-    "daemonize>=2.3.1",
     "bcrypt>=3.1.0",
     "pillow>=4.3.0",
     "sortedcontainers>=1.4.4",
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index fb0dd04f88..6a28c2db9d 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -20,8 +20,6 @@ import urllib
 from inspect import signature
 from typing import Dict, List, Tuple
 
-from twisted.internet import defer
-
 from synapse.api.errors import (
     CodeMessageException,
     HttpResponseException,
@@ -101,7 +99,7 @@ class ReplicationEndpoint(object):
         assert self.METHOD in ("PUT", "POST", "GET")
 
     @abc.abstractmethod
-    def _serialize_payload(**kwargs):
+    async def _serialize_payload(**kwargs):
         """Static method that is called when creating a request.
 
         Concrete implementations should have explicit parameters (rather than
@@ -110,9 +108,8 @@ class ReplicationEndpoint(object):
         argument list.
 
         Returns:
-            Deferred[dict]|dict: If POST/PUT request then dictionary must be
-            JSON serialisable, otherwise must be appropriate for adding as
-            query args.
+            dict: If POST/PUT request then dictionary must be JSON serialisable,
+            otherwise must be appropriate for adding as query args.
         """
         return {}
 
@@ -144,8 +141,7 @@ class ReplicationEndpoint(object):
         instance_map = hs.config.worker.instance_map
 
         @trace(opname="outgoing_replication_request")
-        @defer.inlineCallbacks
-        def send_request(instance_name="master", **kwargs):
+        async def send_request(instance_name="master", **kwargs):
             if instance_name == local_instance_name:
                 raise Exception("Trying to send HTTP request to self")
             if instance_name == "master":
@@ -159,7 +155,7 @@ class ReplicationEndpoint(object):
                     "Instance %r not in 'instance_map' config" % (instance_name,)
                 )
 
-            data = yield cls._serialize_payload(**kwargs)
+            data = await cls._serialize_payload(**kwargs)
 
             url_args = [
                 urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS
@@ -197,7 +193,7 @@ class ReplicationEndpoint(object):
                     headers = {}  # type: Dict[bytes, List[bytes]]
                     inject_active_span_byte_dict(headers, None, check_destination=False)
                     try:
-                        result = yield request_func(uri, data, headers=headers)
+                        result = await request_func(uri, data, headers=headers)
                         break
                     except CodeMessageException as e:
                         if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
@@ -207,7 +203,7 @@ class ReplicationEndpoint(object):
 
                     # If we timed out we probably don't need to worry about backing
                     # off too much, but lets just wait a little anyway.
-                    yield clock.sleep(1)
+                    await clock.sleep(1)
             except HttpResponseException as e:
                 # We convert to SynapseError as we know that it was a SynapseError
                 # on the master process that we should send to the client. (And
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index e32aac0a25..20f3ba76c0 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -60,7 +60,7 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
         self.clock = hs.get_clock()
 
     @staticmethod
-    def _serialize_payload(user_id):
+    async def _serialize_payload(user_id):
         return {}
 
     async def _handle_request(self, request, user_id):
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index ca065e819e..6b56315148 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.events import make_event_from_dict
 from synapse.events.snapshot import EventContext
@@ -67,8 +65,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
         self.federation_handler = hs.get_handlers().federation_handler
 
     @staticmethod
-    @defer.inlineCallbacks
-    def _serialize_payload(store, event_and_contexts, backfilled):
+    async def _serialize_payload(store, event_and_contexts, backfilled):
         """
         Args:
             store
@@ -78,9 +75,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
         """
         event_payloads = []
         for event, context in event_and_contexts:
-            serialized_context = yield defer.ensureDeferred(
-                context.serialize(event, store)
-            )
+            serialized_context = await context.serialize(event, store)
 
             event_payloads.append(
                 {
@@ -156,7 +151,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
         self.registry = hs.get_federation_registry()
 
     @staticmethod
-    def _serialize_payload(edu_type, origin, content):
+    async def _serialize_payload(edu_type, origin, content):
         return {"origin": origin, "content": content}
 
     async def _handle_request(self, request, edu_type):
@@ -199,7 +194,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
         self.registry = hs.get_federation_registry()
 
     @staticmethod
-    def _serialize_payload(query_type, args):
+    async def _serialize_payload(query_type, args):
         """
         Args:
             query_type (str)
@@ -240,7 +235,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
         self.store = hs.get_datastore()
 
     @staticmethod
-    def _serialize_payload(room_id, args):
+    async def _serialize_payload(room_id, args):
         """
         Args:
             room_id (str)
@@ -275,7 +270,7 @@ class ReplicationStoreRoomOnInviteRestServlet(ReplicationEndpoint):
         self.store = hs.get_datastore()
 
     @staticmethod
-    def _serialize_payload(room_id, room_version):
+    async def _serialize_payload(room_id, room_version):
         return {"room_version": room_version.identifier}
 
     async def _handle_request(self, request, room_id):
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index 798b9d3af5..fb326bb869 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -36,7 +36,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
         self.registration_handler = hs.get_registration_handler()
 
     @staticmethod
-    def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
+    async def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
         """
         Args:
             device_id (str|None): Device ID to use, if None a new one is
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 63ef6eb7be..741329ab5f 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -52,7 +52,9 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
         self.clock = hs.get_clock()
 
     @staticmethod
-    def _serialize_payload(requester, room_id, user_id, remote_room_hosts, content):
+    async def _serialize_payload(
+        requester, room_id, user_id, remote_room_hosts, content
+    ):
         """
         Args:
             requester(Requester)
@@ -112,7 +114,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
         self.member_handler = hs.get_room_member_handler()
 
     @staticmethod
-    def _serialize_payload(  # type: ignore
+    async def _serialize_payload(  # type: ignore
         invite_event_id: str,
         txn_id: Optional[str],
         requester: Requester,
@@ -174,7 +176,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
         self.distributor = hs.get_distributor()
 
     @staticmethod
-    def _serialize_payload(room_id, user_id, change):
+    async def _serialize_payload(room_id, user_id, change):
         """
         Args:
             room_id (str)
diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py
index ea1b33331b..bc9aa82cb4 100644
--- a/synapse/replication/http/presence.py
+++ b/synapse/replication/http/presence.py
@@ -50,7 +50,7 @@ class ReplicationBumpPresenceActiveTime(ReplicationEndpoint):
         self._presence_handler = hs.get_presence_handler()
 
     @staticmethod
-    def _serialize_payload(user_id):
+    async def _serialize_payload(user_id):
         return {}
 
     async def _handle_request(self, request, user_id):
@@ -92,7 +92,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint):
         self._presence_handler = hs.get_presence_handler()
 
     @staticmethod
-    def _serialize_payload(user_id, state, ignore_status_msg=False):
+    async def _serialize_payload(user_id, state, ignore_status_msg=False):
         return {
             "state": state,
             "ignore_status_msg": ignore_status_msg,
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 0c4aca1291..ce9420aa69 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -34,7 +34,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
         self.registration_handler = hs.get_registration_handler()
 
     @staticmethod
-    def _serialize_payload(
+    async def _serialize_payload(
         user_id,
         password_hash,
         was_guest,
@@ -105,7 +105,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
         self.registration_handler = hs.get_registration_handler()
 
     @staticmethod
-    def _serialize_payload(user_id, auth_result, access_token):
+    async def _serialize_payload(user_id, auth_result, access_token):
         """
         Args:
             user_id (str): The user ID that consented
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index b30e4d5039..f13d452426 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -15,8 +15,6 @@
 
 import logging
 
-from twisted.internet import defer
-
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
 from synapse.events import make_event_from_dict
 from synapse.events.snapshot import EventContext
@@ -62,8 +60,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
         self.clock = hs.get_clock()
 
     @staticmethod
-    @defer.inlineCallbacks
-    def _serialize_payload(
+    async def _serialize_payload(
         event_id, store, event, context, requester, ratelimit, extra_users
     ):
         """
@@ -77,7 +74,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
             extra_users (list(UserID)): Any extra users to notify about event
         """
 
-        serialized_context = yield defer.ensureDeferred(context.serialize(event, store))
+        serialized_context = await context.serialize(event, store)
 
         payload = {
             "event": event.get_pdu_json(),
diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py
index bde97eef32..309159e304 100644
--- a/synapse/replication/http/streams.py
+++ b/synapse/replication/http/streams.py
@@ -54,7 +54,7 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint):
         self.streams = hs.get_replication_streams()
 
     @staticmethod
-    def _serialize_payload(stream_name, from_token, upto_token):
+    async def _serialize_payload(stream_name, from_token, upto_token):
         return {"from_token": from_token, "upto_token": upto_token}
 
     async def _handle_request(self, request, stream_name):
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index f9e2533e96..60f2e1245f 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -16,8 +16,8 @@
 import logging
 from typing import Optional
 
-from synapse.storage.data_stores.main.cache import CacheInvalidationWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import MultiWriterIdGenerator
 
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
 
 
 class BaseSlavedStore(CacheInvalidationWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(BaseSlavedStore, self).__init__(database, db_conn, hs)
         if isinstance(self.database_engine, PostgresEngine):
             self._cache_id_gen = MultiWriterIdGenerator(
diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py
index 525b94fd87..154f0e687c 100644
--- a/synapse/replication/slave/storage/account_data.py
+++ b/synapse/replication/slave/storage/account_data.py
@@ -17,13 +17,13 @@
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream
-from synapse.storage.data_stores.main.account_data import AccountDataWorkerStore
-from synapse.storage.data_stores.main.tags import TagsWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.account_data import AccountDataWorkerStore
+from synapse.storage.databases.main.tags import TagsWorkerStore
 
 
 class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         self._account_data_id_gen = SlavedIdTracker(
             db_conn,
             "account_data",
diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py
index a67fbeffb7..0f8d7037bd 100644
--- a/synapse/replication/slave/storage/appservice.py
+++ b/synapse/replication/slave/storage/appservice.py
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.appservice import (
+from synapse.storage.databases.main.appservice import (
     ApplicationServiceTransactionWorkerStore,
     ApplicationServiceWorkerStore,
 )
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index 1a38f53dfb..60dd3f6701 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -13,15 +13,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY
 from synapse.util.caches.descriptors import Cache
 
 from ._base import BaseSlavedStore
 
 
 class SlavedClientIpStore(BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedClientIpStore, self).__init__(database, db_conn, hs)
 
         self.client_ip_last_seen = Cache(
diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py
index a8a16dbc71..ee7f69a918 100644
--- a/synapse/replication/slave/storage/deviceinbox.py
+++ b/synapse/replication/slave/storage/deviceinbox.py
@@ -16,14 +16,14 @@
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams import ToDeviceStream
-from synapse.storage.data_stores.main.deviceinbox import DeviceInboxWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
 from synapse.util.caches.expiringcache import ExpiringCache
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 
 class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs)
         self._device_inbox_id_gen = SlavedIdTracker(
             db_conn, "device_inbox", "stream_id"
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 9d8067342f..722f3745e9 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -16,14 +16,14 @@
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
-from synapse.storage.data_stores.main.devices import DeviceWorkerStore
-from synapse.storage.data_stores.main.end_to_end_keys import EndToEndKeyWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.devices import DeviceWorkerStore
+from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 
 class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedDeviceStore, self).__init__(database, db_conn, hs)
 
         self.hs = hs
diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py
index 8b9717c46f..1945bcf9a8 100644
--- a/synapse/replication/slave/storage/directory.py
+++ b/synapse/replication/slave/storage/directory.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.directory import DirectoryWorkerStore
+from synapse.storage.databases.main.directory import DirectoryWorkerStore
 
 from ._base import BaseSlavedStore
 
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 1a1a50a24f..da1cc836cf 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -15,18 +15,18 @@
 # limitations under the License.
 import logging
 
-from synapse.storage.data_stores.main.event_federation import EventFederationWorkerStore
-from synapse.storage.data_stores.main.event_push_actions import (
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
     EventPushActionsWorkerStore,
 )
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.data_stores.main.relations import RelationsWorkerStore
-from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
-from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
-from synapse.storage.data_stores.main.state import StateGroupWorkerStore
-from synapse.storage.data_stores.main.stream import StreamWorkerStore
-from synapse.storage.data_stores.main.user_erasure_store import UserErasureWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 from ._base import BaseSlavedStore
@@ -55,11 +55,11 @@ class SlavedEventStore(
     RelationsWorkerStore,
     BaseSlavedStore,
 ):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedEventStore, self).__init__(database, db_conn, hs)
 
         events_max = self._stream_id_gen.get_current_token()
-        curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
+        curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
             db_conn,
             "current_state_delta_stream",
             entity_column="room_id",
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py
index bcb0688954..2562b6fc38 100644
--- a/synapse/replication/slave/storage/filtering.py
+++ b/synapse/replication/slave/storage/filtering.py
@@ -13,14 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.filtering import FilteringStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.filtering import FilteringStore
 
 from ._base import BaseSlavedStore
 
 
 class SlavedFilteringStore(BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedFilteringStore, self).__init__(database, db_conn, hs)
 
     # Filters are immutable so this cache doesn't need to be expired
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 5d210fa3a1..3291558c7a 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -16,13 +16,13 @@
 from synapse.replication.slave.storage._base import BaseSlavedStore
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams import GroupServerStream
-from synapse.storage.data_stores.main.group_server import GroupServerWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.group_server import GroupServerWorkerStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 
 class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedGroupServerStore, self).__init__(database, db_conn, hs)
 
         self.hs = hs
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index 3def367ae9..961579751c 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.keys import KeyStore
+from synapse.storage.databases.main.keys import KeyStore
 
 # KeyStore isn't really safe to use from a worker, but for now we do so and hope that
 # the races it creates aren't too bad.
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 2938cb8e43..a912c04360 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -15,8 +15,8 @@
 
 from synapse.replication.tcp.streams import PresenceStream
 from synapse.storage import DataStore
-from synapse.storage.data_stores.main.presence import PresenceStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.presence import PresenceStore
 from synapse.util.caches.stream_change_cache import StreamChangeCache
 
 from ._base import BaseSlavedStore
@@ -24,7 +24,7 @@ from ._slaved_id_tracker import SlavedIdTracker
 
 
 class SlavedPresenceStore(BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedPresenceStore, self).__init__(database, db_conn, hs)
         self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id")
 
diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py
index 28c508aad3..f85b20a071 100644
--- a/synapse/replication/slave/storage/profile.py
+++ b/synapse/replication/slave/storage/profile.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 from synapse.replication.slave.storage._base import BaseSlavedStore
-from synapse.storage.data_stores.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.profile import ProfileWorkerStore
 
 
 class SlavedProfileStore(ProfileWorkerStore, BaseSlavedStore):
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
index 23ec1c5b11..590187df46 100644
--- a/synapse/replication/slave/storage/push_rule.py
+++ b/synapse/replication/slave/storage/push_rule.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 from synapse.replication.tcp.streams import PushRulesStream
-from synapse.storage.data_stores.main.push_rule import PushRulesWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
 
 from .events import SlavedEventStore
 
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
index ff449f3658..63300e5da6 100644
--- a/synapse/replication/slave/storage/pushers.py
+++ b/synapse/replication/slave/storage/pushers.py
@@ -15,15 +15,15 @@
 # limitations under the License.
 
 from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.data_stores.main.pusher import PusherWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.pusher import PusherWorkerStore
 
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
 
 class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SlavedPusherStore, self).__init__(database, db_conn, hs)
         self._pushers_id_gen = SlavedIdTracker(
             db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py
index 6982686eb5..17ba1f22ac 100644
--- a/synapse/replication/slave/storage/receipts.py
+++ b/synapse/replication/slave/storage/receipts.py
@@ -15,15 +15,15 @@
 # limitations under the License.
 
 from synapse.replication.tcp.streams import ReceiptsStream
-from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
 
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
 
 class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         # We instantiate this first as the ReceiptsWorkerStore constructor
         # needs to be able to call get_max_receipt_stream_id
         self._receipts_id_gen = SlavedIdTracker(
diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py
index 4b8553e250..a40f064e2b 100644
--- a/synapse/replication/slave/storage/registration.py
+++ b/synapse/replication/slave/storage/registration.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.registration import RegistrationWorkerStore
 
 from ._base import BaseSlavedStore
 
diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py
index 8710207ada..427c81772b 100644
--- a/synapse/replication/slave/storage/room.py
+++ b/synapse/replication/slave/storage/room.py
@@ -14,15 +14,15 @@
 # limitations under the License.
 
 from synapse.replication.tcp.streams import PublicRoomsStream
-from synapse.storage.data_stores.main.room import RoomWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.room import RoomWorkerStore
 
 from ._base import BaseSlavedStore
 from ._slaved_id_tracker import SlavedIdTracker
 
 
 class RoomStore(RoomWorkerStore, BaseSlavedStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomStore, self).__init__(database, db_conn, hs)
         self._public_room_id_gen = SlavedIdTracker(
             db_conn, "public_room_list_stream", "stream_id"
diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py
index ac88e6b8c3..2091ac0df6 100644
--- a/synapse/replication/slave/storage/transactions.py
+++ b/synapse/replication/slave/storage/transactions.py
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.main.transactions import TransactionStore
+from synapse.storage.databases.main.transactions import TransactionStore
 
 from ._base import BaseSlavedStore
 
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index a8364d9793..7c292ef3f9 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -31,7 +31,7 @@ from synapse.rest.admin._base import (
     assert_user_is_admin,
     historical_admin_path_patterns,
 )
-from synapse.storage.data_stores.main.room import RoomSortOrder
+from synapse.storage.databases.main.room import RoomSortOrder
 from synapse.types import RoomAlias, RoomID, UserID, create_requester
 
 logger = logging.getLogger(__name__)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 26d5a51cb2..2ab30ce897 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -444,7 +444,7 @@ class RoomMemberListRestServlet(RestServlet):
 
     async def on_GET(self, request, room_id):
         # TODO support Pagination stream API (limit/tokens)
-        requester = await self.auth.get_user_by_req(request)
+        requester = await self.auth.get_user_by_req(request, allow_guest=True)
         handler = self.message_handler
 
         # request the state as of a given event, as identified by a stream token,
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 858b6d3005..ab1fa705bf 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import contextlib
-import inspect
 import logging
 import os
 import shutil
@@ -30,7 +29,7 @@ from .filepath import MediaFilePaths
 if TYPE_CHECKING:
     from synapse.server import HomeServer
 
-    from .storage_provider import StorageProvider
+    from .storage_provider import StorageProviderWrapper
 
 logger = logging.getLogger(__name__)
 
@@ -50,7 +49,7 @@ class MediaStorage(object):
         hs: "HomeServer",
         local_media_directory: str,
         filepaths: MediaFilePaths,
-        storage_providers: Sequence["StorageProvider"],
+        storage_providers: Sequence["StorageProviderWrapper"],
     ):
         self.hs = hs
         self.local_media_directory = local_media_directory
@@ -115,11 +114,7 @@ class MediaStorage(object):
 
         async def finish():
             for provider in self.storage_providers:
-                # store_file is supposed to return an Awaitable, but guard
-                # against improper implementations.
-                result = provider.store_file(path, file_info)
-                if inspect.isawaitable(result):
-                    await result
+                await provider.store_file(path, file_info)
 
             finished_called[0] = True
 
@@ -153,11 +148,7 @@ class MediaStorage(object):
             return FileResponder(open(local_path, "rb"))
 
         for provider in self.storage_providers:
-            res = provider.fetch(path, file_info)  # type: Any
-            # Fetch is supposed to return an Awaitable[Responder], but guard
-            # against improper implementations.
-            if inspect.isawaitable(res):
-                res = await res
+            res = await provider.fetch(path, file_info)  # type: Any
             if res:
                 logger.debug("Streaming %s from %s", path, provider)
                 return res
@@ -184,11 +175,7 @@ class MediaStorage(object):
             os.makedirs(dirname)
 
         for provider in self.storage_providers:
-            res = provider.fetch(path, file_info)  # type: Any
-            # Fetch is supposed to return an Awaitable[Responder], but guard
-            # against improper implementations.
-            if inspect.isawaitable(res):
-                res = await res
+            res = await provider.fetch(path, file_info)  # type: Any
             if res:
                 with res:
                     consumer = BackgroundFileConsumer(
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index e12f65a206..f4768a9e8b 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -586,7 +586,7 @@ class PreviewUrlResource(DirectServeJsonResource):
 
         logger.debug("Running url preview cache expiry")
 
-        if not (await self.store.db.updates.has_completed_background_updates()):
+        if not (await self.store.db_pool.updates.has_completed_background_updates()):
             logger.info("Still running DB updates; skipping expiry")
             return
 
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index a33f56e806..18c9ed48d6 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import inspect
 import logging
 import os
 import shutil
@@ -88,12 +89,18 @@ class StorageProviderWrapper(StorageProvider):
             return None
 
         if self.store_synchronous:
-            return await self.backend.store_file(path, file_info)
+            # store_file is supposed to return an Awaitable, but guard
+            # against improper implementations.
+            result = self.backend.store_file(path, file_info)
+            if inspect.isawaitable(result):
+                return await result
         else:
             # TODO: Handle errors.
-            def store():
+            async def store():
                 try:
-                    return self.backend.store_file(path, file_info)
+                    result = self.backend.store_file(path, file_info)
+                    if inspect.isawaitable(result):
+                        return await result
                 except Exception:
                     logger.exception("Error storing file")
 
@@ -101,7 +108,11 @@ class StorageProviderWrapper(StorageProvider):
             return None
 
     async def fetch(self, path, file_info):
-        return await self.backend.fetch(path, file_info)
+        # store_file is supposed to return an Awaitable, but guard
+        # against improper implementations.
+        result = self.backend.fetch(path, file_info)
+        if inspect.isawaitable(result):
+            return await result
 
 
 class FileStorageProviderBackend(StorageProvider):
diff --git a/synapse/server.py b/synapse/server.py
index 8e41112530..81d7f26f9c 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -105,7 +105,7 @@ from synapse.server_notices.worker_server_notices_sender import (
     WorkerServerNoticesSender,
 )
 from synapse.state import StateHandler, StateResolutionHandler
-from synapse.storage import DataStore, DataStores, Storage
+from synapse.storage import Databases, DataStore, Storage
 from synapse.streams.events import EventSources
 from synapse.util import Clock
 from synapse.util.distributor import Distributor
@@ -280,7 +280,7 @@ class HomeServer(object):
     def setup(self):
         logger.info("Setting up.")
         self.start_time = int(self.get_clock().time())
-        self.datastores = DataStores(self.DATASTORE_CLASS, self)
+        self.datastores = Databases(self.DATASTORE_CLASS, self)
         logger.info("Finished setting up.")
 
     def setup_master(self):
diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py
index 3bfc8d7278..089cfef0b3 100644
--- a/synapse/server_notices/consent_server_notices.py
+++ b/synapse/server_notices/consent_server_notices.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Any
 
 from synapse.api.errors import SynapseError
 from synapse.api.urls import ConsentURIBuilder
@@ -55,14 +56,11 @@ class ConsentServerNotices(object):
 
             self._consent_uri_builder = ConsentURIBuilder(hs.config)
 
-    async def maybe_send_server_notice_to_user(self, user_id):
+    async def maybe_send_server_notice_to_user(self, user_id: str) -> None:
         """Check if we need to send a notice to this user, and does so if so
 
         Args:
-            user_id (str): user to check
-
-        Returns:
-            Deferred
+            user_id: user to check
         """
         if self._server_notice_content is None:
             # not enabled
@@ -105,7 +103,7 @@ class ConsentServerNotices(object):
             self._users_in_progress.remove(user_id)
 
 
-def copy_with_str_subst(x, substitutions):
+def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
     """Deep-copy a structure, carrying out string substitions on any strings
 
     Args:
@@ -121,7 +119,7 @@ def copy_with_str_subst(x, substitutions):
     if isinstance(x, dict):
         return {k: copy_with_str_subst(v, substitutions) for (k, v) in x.items()}
     if isinstance(x, (list, tuple)):
-        return [copy_with_str_subst(y) for y in x]
+        return [copy_with_str_subst(y, substitutions) for y in x]
 
     # assume it's uninterested and can be shallow-copied.
     return x
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 4404ceff93..c2faef6eab 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import List, Tuple
 
 from synapse.api.constants import (
     EventTypes,
@@ -52,7 +53,7 @@ class ResourceLimitsServerNotices(object):
             and not hs.config.hs_disabled
         )
 
-    async def maybe_send_server_notice_to_user(self, user_id):
+    async def maybe_send_server_notice_to_user(self, user_id: str) -> None:
         """Check if we need to send a notice to this user, this will be true in
         two cases.
         1. The server has reached its limit does not reflect this
@@ -60,10 +61,7 @@ class ResourceLimitsServerNotices(object):
         actually the server is fine
 
         Args:
-            user_id (str): user to check
-
-        Returns:
-            Deferred
+            user_id: user to check
         """
         if not self._enabled:
             return
@@ -115,19 +113,21 @@ class ResourceLimitsServerNotices(object):
             elif not currently_blocked and limit_msg:
                 # Room is not notifying of a block, when it ought to be.
                 await self._apply_limit_block_notification(
-                    user_id, limit_msg, limit_type
+                    user_id, limit_msg, limit_type  # type: ignore
                 )
         except SynapseError as e:
             logger.error("Error sending resource limits server notice: %s", e)
 
-    async def _remove_limit_block_notification(self, user_id, ref_events):
+    async def _remove_limit_block_notification(
+        self, user_id: str, ref_events: List[str]
+    ) -> None:
         """Utility method to remove limit block notifications from the server
         notices room.
 
         Args:
-            user_id (str): user to notify
-            ref_events (list[str]): The event_ids of pinned events that are unrelated to
-            limit blocking and need to be preserved.
+            user_id: user to notify
+            ref_events: The event_ids of pinned events that are unrelated to
+                limit blocking and need to be preserved.
         """
         content = {"pinned": ref_events}
         await self._server_notices_manager.send_notice(
@@ -135,16 +135,16 @@ class ResourceLimitsServerNotices(object):
         )
 
     async def _apply_limit_block_notification(
-        self, user_id, event_body, event_limit_type
-    ):
+        self, user_id: str, event_body: str, event_limit_type: str
+    ) -> None:
         """Utility method to apply limit block notifications in the server
         notices room.
 
         Args:
-            user_id (str): user to notify
-            event_body(str): The human readable text that describes the block.
-            event_limit_type(str): Specifies the type of block e.g. monthly active user
-            limit has been exceeded.
+            user_id: user to notify
+            event_body: The human readable text that describes the block.
+            event_limit_type: Specifies the type of block e.g. monthly active user
+                limit has been exceeded.
         """
         content = {
             "body": event_body,
@@ -162,7 +162,7 @@ class ResourceLimitsServerNotices(object):
             user_id, content, EventTypes.Pinned, ""
         )
 
-    async def _check_and_set_tags(self, user_id, room_id):
+    async def _check_and_set_tags(self, user_id: str, room_id: str) -> None:
         """
         Since server notices rooms were originally not with tags,
         important to check that tags have been set correctly
@@ -182,17 +182,16 @@ class ResourceLimitsServerNotices(object):
             )
             self._notifier.on_new_event("account_data_key", max_id, users=[user_id])
 
-    async def _is_room_currently_blocked(self, room_id):
+    async def _is_room_currently_blocked(self, room_id: str) -> Tuple[bool, List[str]]:
         """
         Determines if the room is currently blocked
 
         Args:
-            room_id(str): The room id of the server notices room
+            room_id: The room id of the server notices room
 
         Returns:
-            Deferred[Tuple[bool, List]]:
             bool: Is the room currently blocked
-            list: The list of pinned events that are unrelated to limit blocking
+            list: The list of pinned event IDs that are unrelated to limit blocking
             This list can be used as a convenience in the case where the block
             is to be lifted and the remaining pinned event references need to be
             preserved
@@ -207,7 +206,7 @@ class ResourceLimitsServerNotices(object):
             # The user has yet to join the server notices room
             pass
 
-        referenced_events = []
+        referenced_events = []  # type: List[str]
         if pinned_state_event is not None:
             referenced_events = list(pinned_state_event.content.get("pinned", []))
 
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index bf2454c01c..ed96aa8571 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -13,8 +13,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 import logging
+from typing import Optional
 
 from synapse.api.constants import EventTypes, Membership, RoomCreationPreset
+from synapse.events import EventBase
 from synapse.types import UserID, create_requester
 from synapse.util.caches.descriptors import cached
 
@@ -50,20 +52,21 @@ class ServerNoticesManager(object):
         return self._config.server_notices_mxid is not None
 
     async def send_notice(
-        self, user_id, event_content, type=EventTypes.Message, state_key=None
-    ):
+        self,
+        user_id: str,
+        event_content: dict,
+        type: str = EventTypes.Message,
+        state_key: Optional[bool] = None,
+    ) -> EventBase:
         """Send a notice to the given user
 
         Creates the server notices room, if none exists.
 
         Args:
-            user_id (str): mxid of user to send event to.
-            event_content (dict): content of event to send
-            type(EventTypes): type of event
-            is_state_event(bool): Is the event a state event
-
-        Returns:
-            Deferred[FrozenEvent]
+            user_id: mxid of user to send event to.
+            event_content: content of event to send
+            type: type of event
+            is_state_event: Is the event a state event
         """
         room_id = await self.get_or_create_notice_room_for_user(user_id)
         await self.maybe_invite_user_to_room(user_id, room_id)
@@ -89,17 +92,17 @@ class ServerNoticesManager(object):
         return event
 
     @cached()
-    async def get_or_create_notice_room_for_user(self, user_id):
+    async def get_or_create_notice_room_for_user(self, user_id: str) -> str:
         """Get the room for notices for a given user
 
         If we have not yet created a notice room for this user, create it, but don't
         invite the user to it.
 
         Args:
-            user_id (str): complete user id for the user we want a room for
+            user_id: complete user id for the user we want a room for
 
         Returns:
-            str: room id of notice room.
+            room id of notice room.
         """
         if not self.is_enabled():
             raise Exception("Server notices not enabled")
@@ -163,7 +166,7 @@ class ServerNoticesManager(object):
         logger.info("Created server notices room %s for %s", room_id, user_id)
         return room_id
 
-    async def maybe_invite_user_to_room(self, user_id: str, room_id: str):
+    async def maybe_invite_user_to_room(self, user_id: str, room_id: str) -> None:
         """Invite the given user to the given server room, unless the user has already
         joined or been invited to it.
 
diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py
index be74e86641..a754f75db4 100644
--- a/synapse/server_notices/server_notices_sender.py
+++ b/synapse/server_notices/server_notices_sender.py
@@ -12,6 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from typing import Iterable, Union
+
 from synapse.server_notices.consent_server_notices import ConsentServerNotices
 from synapse.server_notices.resource_limits_server_notices import (
     ResourceLimitsServerNotices,
@@ -32,22 +34,22 @@ class ServerNoticesSender(object):
         self._server_notices = (
             ConsentServerNotices(hs),
             ResourceLimitsServerNotices(hs),
-        )
+        )  # type: Iterable[Union[ConsentServerNotices, ResourceLimitsServerNotices]]
 
-    async def on_user_syncing(self, user_id):
+    async def on_user_syncing(self, user_id: str) -> None:
         """Called when the user performs a sync operation.
 
         Args:
-            user_id (str): mxid of user who synced
+            user_id: mxid of user who synced
         """
         for sn in self._server_notices:
             await sn.maybe_send_server_notice_to_user(user_id)
 
-    async def on_user_ip(self, user_id):
+    async def on_user_ip(self, user_id: str) -> None:
         """Called on the master when a worker process saw a client request.
 
         Args:
-            user_id (str): mxid
+            user_id: mxid
         """
         # The synchrotrons use a stubbed version of ServerNoticesSender, so
         # we check for notices to send to the user in on_user_ip as well as
diff --git a/synapse/server_notices/worker_server_notices_sender.py b/synapse/server_notices/worker_server_notices_sender.py
index 245ec7c64f..e9390b19da 100644
--- a/synapse/server_notices/worker_server_notices_sender.py
+++ b/synapse/server_notices/worker_server_notices_sender.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from twisted.internet import defer
 
 
 class WorkerServerNoticesSender(object):
@@ -24,24 +23,18 @@ class WorkerServerNoticesSender(object):
             hs (synapse.server.HomeServer):
         """
 
-    def on_user_syncing(self, user_id):
+    async def on_user_syncing(self, user_id: str) -> None:
         """Called when the user performs a sync operation.
 
         Args:
-            user_id (str): mxid of user who synced
-
-        Returns:
-            Deferred
+            user_id: mxid of user who synced
         """
-        return defer.succeed(None)
+        return None
 
-    def on_user_ip(self, user_id):
+    async def on_user_ip(self, user_id: str) -> None:
         """Called on the master when a worker process saw a client request.
 
         Args:
-            user_id (str): mxid
-
-        Returns:
-            Deferred
+            user_id: mxid
         """
         raise AssertionError("on_user_ip unexpectedly called on worker")
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 25ccef5aa5..a1d3884667 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -28,7 +28,7 @@ from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.logging.utils import log_function
 from synapse.state import v1, v2
-from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
 from synapse.storage.roommember import ProfileInfo
 from synapse.types import StateMap
 from synapse.util import Clock
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index ec89f645d4..5ef3853559 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -17,18 +17,19 @@
 """
 The storage layer is split up into multiple parts to allow Synapse to run
 against different configurations of databases (e.g. single or multiple
-databases). The `Database` class represents a single physical database. The
-`data_stores` are classes that talk directly to a `Database` instance and have
-associated schemas, background updates, etc. On top of those there are classes
-that provide high level interfaces that combine calls to multiple `data_stores`.
+databases). The `DatabasePool` class represents connections to a single physical
+database. The `databases` are classes that talk directly to a `DatabasePool`
+instance and have associated schemas, background updates, etc. On top of those
+there are classes that provide high level interfaces that combine calls to
+multiple `databases`.
 
 There are also schemas that get applied to every database, regardless of the
 data stores associated with them (e.g. the schema version tables), which are
 stored in `synapse.storage.schema`.
 """
 
-from synapse.storage.data_stores import DataStores
-from synapse.storage.data_stores.main import DataStore
+from synapse.storage.databases import Databases
+from synapse.storage.databases.main import DataStore
 from synapse.storage.persist_events import EventsPersistenceStorage
 from synapse.storage.purge_events import PurgeEventsStorage
 from synapse.storage.state import StateGroupStorage
@@ -40,7 +41,7 @@ class Storage(object):
     """The high level interfaces for talking to various storage layers.
     """
 
-    def __init__(self, hs, stores: DataStores):
+    def __init__(self, hs, stores: Databases):
         # We include the main data store here mainly so that we don't have to
         # rewrite all the existing code to split it into high vs low level
         # interfaces.
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 985a042869..ca800df831 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -23,7 +23,7 @@ from canonicaljson import json
 
 from synapse.storage.database import LoggingTransaction  # noqa: F401
 from synapse.storage.database import make_in_list_sql_clause  # noqa: F401
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.types import Collection, get_domain_from_id
 
 logger = logging.getLogger(__name__)
@@ -37,11 +37,11 @@ class SQLBaseStore(metaclass=ABCMeta):
     per data store (and not one per physical database).
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         self.hs = hs
         self._clock = hs.get_clock()
         self.database_engine = database.engine
-        self.db = database
+        self.db_pool = database
         self.rand = random.SystemRandom()
 
     def process_replication_rows(self, stream_name, instance_name, token, rows):
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 018826ef69..f43463df53 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -88,7 +88,7 @@ class BackgroundUpdater(object):
 
     def __init__(self, hs, database):
         self._clock = hs.get_clock()
-        self.db = database
+        self.db_pool = database
 
         # if a background update is currently running, its name.
         self._current_background_update = None  # type: Optional[str]
@@ -139,7 +139,7 @@ class BackgroundUpdater(object):
         # otherwise, check if there are updates to be run. This is important,
         # as we may be running on a worker which doesn't perform the bg updates
         # itself, but still wants to wait for them to happen.
-        updates = await self.db.simple_select_onecol(
+        updates = await self.db_pool.simple_select_onecol(
             "background_updates",
             keyvalues=None,
             retcol="1",
@@ -160,7 +160,7 @@ class BackgroundUpdater(object):
         if update_name == self._current_background_update:
             return False
 
-        update_exists = await self.db.simple_select_one_onecol(
+        update_exists = await self.db_pool.simple_select_one_onecol(
             "background_updates",
             keyvalues={"update_name": update_name},
             retcol="1",
@@ -189,10 +189,10 @@ class BackgroundUpdater(object):
                 ORDER BY ordering, update_name
                 """
             )
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
         if not self._current_background_update:
-            all_pending_updates = await self.db.runInteraction(
+            all_pending_updates = await self.db_pool.runInteraction(
                 "background_updates", get_background_updates_txn,
             )
             if not all_pending_updates:
@@ -243,7 +243,7 @@ class BackgroundUpdater(object):
         else:
             batch_size = self.DEFAULT_BACKGROUND_BATCH_SIZE
 
-        progress_json = await self.db.simple_select_one_onecol(
+        progress_json = await self.db_pool.simple_select_one_onecol(
             "background_updates",
             keyvalues={"update_name": update_name},
             retcol="progress_json",
@@ -402,7 +402,7 @@ class BackgroundUpdater(object):
             logger.debug("[SQL] %s", sql)
             c.execute(sql)
 
-        if isinstance(self.db.engine, engines.PostgresEngine):
+        if isinstance(self.db_pool.engine, engines.PostgresEngine):
             runner = create_index_psql
         elif psql_only:
             runner = None
@@ -413,7 +413,7 @@ class BackgroundUpdater(object):
         def updater(progress, batch_size):
             if runner is not None:
                 logger.info("Adding index %s to %s", index_name, table)
-                yield self.db.runWithConnection(runner)
+                yield self.db_pool.runWithConnection(runner)
             yield self._end_background_update(update_name)
             return 1
 
@@ -433,7 +433,7 @@ class BackgroundUpdater(object):
                 % update_name
             )
         self._current_background_update = None
-        return self.db.simple_delete_one(
+        return self.db_pool.simple_delete_one(
             "background_updates", keyvalues={"update_name": update_name}
         )
 
@@ -445,7 +445,7 @@ class BackgroundUpdater(object):
             progress: The progress of the update.
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "background_update_progress",
             self._background_update_progress_txn,
             update_name,
@@ -463,7 +463,7 @@ class BackgroundUpdater(object):
 
         progress_json = json.dumps(progress)
 
-        self.db.simple_update_one_txn(
+        self.db_pool.simple_update_one_txn(
             txn,
             "background_updates",
             keyvalues={"update_name": update_name},
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index ce8757a400..4ada6f5563 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -279,7 +279,7 @@ class PerformanceCounters(object):
         return top_n_counters
 
 
-class Database(object):
+class DatabasePool(object):
     """Wraps a single physical database and connection pool.
 
     A single database may be used by multiple data stores.
diff --git a/synapse/storage/data_stores/__init__.py b/synapse/storage/databases/__init__.py
index 599ee470d4..b163eebf39 100644
--- a/synapse/storage/data_stores/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -15,17 +15,17 @@
 
 import logging
 
-from synapse.storage.data_stores.main.events import PersistEventsStore
-from synapse.storage.data_stores.state import StateGroupDataStore
-from synapse.storage.database import Database, make_conn
+from synapse.storage.database import DatabasePool, make_conn
+from synapse.storage.databases.main.events import PersistEventsStore
+from synapse.storage.databases.state import StateGroupDataStore
 from synapse.storage.engines import create_engine
 from synapse.storage.prepare_database import prepare_database
 
 logger = logging.getLogger(__name__)
 
 
-class DataStores(object):
-    """The various data stores.
+class Databases(object):
+    """The various databases.
 
     These are low level interfaces to physical databases.
 
@@ -51,12 +51,12 @@ class DataStores(object):
 
                 engine.check_database(db_conn)
                 prepare_database(
-                    db_conn, engine, hs.config, data_stores=database_config.data_stores,
+                    db_conn, engine, hs.config, databases=database_config.databases,
                 )
 
-                database = Database(hs, database_config, engine)
+                database = DatabasePool(hs, database_config, engine)
 
-                if "main" in database_config.data_stores:
+                if "main" in database_config.databases:
                     logger.info("Starting 'main' data store")
 
                     # Sanity check we don't try and configure the main store on
@@ -73,7 +73,7 @@ class DataStores(object):
                             hs, database, self.main
                         )
 
-                if "state" in database_config.data_stores:
+                if "state" in database_config.databases:
                     logger.info("Starting 'state' data store")
 
                     # Sanity check we don't try and configure the state store on
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 932458f651..17fa470919 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -21,7 +21,7 @@ import time
 
 from synapse.api.constants import PresenceState
 from synapse.config.homeserver import HomeServerConfig
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.util.id_generators import (
     IdGenerator,
@@ -119,7 +119,7 @@ class DataStore(
     CacheInvalidationWorkerStore,
     ServerMetricsStore,
 ):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         self.hs = hs
         self._clock = hs.get_clock()
         self.database_engine = database.engine
@@ -174,7 +174,7 @@ class DataStore(
 
         self._presence_on_startup = self._get_active_presence(db_conn)
 
-        presence_cache_prefill, min_presence_val = self.db.get_cache_dict(
+        presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict(
             db_conn,
             "presence_stream",
             entity_column="user_id",
@@ -188,7 +188,7 @@ class DataStore(
         )
 
         max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
-        device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(
+        device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict(
             db_conn,
             "device_inbox",
             entity_column="user_id",
@@ -203,7 +203,7 @@ class DataStore(
         )
         # The federation outbox and the local device inbox uses the same
         # stream_id generator.
-        device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(
+        device_outbox_prefill, min_device_outbox_id = self.db_pool.get_cache_dict(
             db_conn,
             "device_federation_outbox",
             entity_column="destination",
@@ -229,7 +229,7 @@ class DataStore(
         )
 
         events_max = self._stream_id_gen.get_current_token()
-        curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(
+        curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
             db_conn,
             "current_state_delta_stream",
             entity_column="room_id",
@@ -243,7 +243,7 @@ class DataStore(
             prefilled_cache=curr_state_delta_prefill,
         )
 
-        _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(
+        _group_updates_prefill, min_group_updates_id = self.db_pool.get_cache_dict(
             db_conn,
             "local_group_updates",
             entity_column="user_id",
@@ -282,7 +282,7 @@ class DataStore(
 
         txn = db_conn.cursor()
         txn.execute(sql, (PresenceState.OFFLINE,))
-        rows = self.db.cursor_to_dict(txn)
+        rows = self.db_pool.cursor_to_dict(txn)
         txn.close()
 
         for row in rows:
@@ -295,7 +295,9 @@ class DataStore(
         Counts the number of users who used this homeserver in the last 24 hours.
         """
         yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
-        return self.db.runInteraction("count_daily_users", self._count_users, yesterday)
+        return self.db_pool.runInteraction(
+            "count_daily_users", self._count_users, yesterday
+        )
 
     def count_monthly_users(self):
         """
@@ -305,7 +307,7 @@ class DataStore(
         amongst other things, includes a 3 day grace period before a user counts.
         """
         thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "count_monthly_users", self._count_users, thirty_days_ago
         )
 
@@ -405,7 +407,7 @@ class DataStore(
 
             return results
 
-        return self.db.runInteraction("count_r30_users", _count_r30_users)
+        return self.db_pool.runInteraction("count_r30_users", _count_r30_users)
 
     def _get_start_of_day(self):
         """
@@ -470,7 +472,7 @@ class DataStore(
             # frequently
             self._last_user_visit_update = now
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "generate_user_daily_visits", _generate_user_daily_visits
         )
 
@@ -481,7 +483,7 @@ class DataStore(
         Returns:
             defer.Deferred: resolves to list[dict[str, Any]]
         """
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             table="users",
             keyvalues={},
             retcols=[
@@ -543,10 +545,12 @@ class DataStore(
                 where_clause
             )
             txn.execute(sql, args)
-            users = self.db.cursor_to_dict(txn)
+            users = self.db_pool.cursor_to_dict(txn)
             return users, count
 
-        return self.db.runInteraction("get_users_paginate_txn", get_users_paginate_txn)
+        return self.db_pool.runInteraction(
+            "get_users_paginate_txn", get_users_paginate_txn
+        )
 
     def search_users(self, term):
         """Function to search users list for one or more users with
@@ -558,7 +562,7 @@ class DataStore(
         Returns:
             defer.Deferred: resolves to list[dict[str, Any]]
         """
-        return self.db.simple_search_list(
+        return self.db_pool.simple_search_list(
             table="users",
             term=term,
             col="name",
diff --git a/synapse/storage/data_stores/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 33cc372dfd..2193d8fdc5 100644
--- a/synapse/storage/data_stores/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -23,7 +23,7 @@ from canonicaljson import json
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
 from synapse.util.caches.stream_change_cache import StreamChangeCache
@@ -40,7 +40,7 @@ class AccountDataWorkerStore(SQLBaseStore):
     # the abstract methods being implemented.
     __metaclass__ = abc.ABCMeta
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         account_max = self.get_max_account_data_stream_id()
         self._account_data_stream_cache = StreamChangeCache(
             "AccountDataAndTagsChangeCache", account_max
@@ -69,7 +69,7 @@ class AccountDataWorkerStore(SQLBaseStore):
         """
 
         def get_account_data_for_user_txn(txn):
-            rows = self.db.simple_select_list_txn(
+            rows = self.db_pool.simple_select_list_txn(
                 txn,
                 "account_data",
                 {"user_id": user_id},
@@ -80,7 +80,7 @@ class AccountDataWorkerStore(SQLBaseStore):
                 row["account_data_type"]: db_to_json(row["content"]) for row in rows
             }
 
-            rows = self.db.simple_select_list_txn(
+            rows = self.db_pool.simple_select_list_txn(
                 txn,
                 "room_account_data",
                 {"user_id": user_id},
@@ -94,7 +94,7 @@ class AccountDataWorkerStore(SQLBaseStore):
 
             return global_account_data, by_room
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_account_data_for_user", get_account_data_for_user_txn
         )
 
@@ -104,7 +104,7 @@ class AccountDataWorkerStore(SQLBaseStore):
         Returns:
             Deferred: A dict
         """
-        result = yield self.db.simple_select_one_onecol(
+        result = yield self.db_pool.simple_select_one_onecol(
             table="account_data",
             keyvalues={"user_id": user_id, "account_data_type": data_type},
             retcol="content",
@@ -129,7 +129,7 @@ class AccountDataWorkerStore(SQLBaseStore):
         """
 
         def get_account_data_for_room_txn(txn):
-            rows = self.db.simple_select_list_txn(
+            rows = self.db_pool.simple_select_list_txn(
                 txn,
                 "room_account_data",
                 {"user_id": user_id, "room_id": room_id},
@@ -140,7 +140,7 @@ class AccountDataWorkerStore(SQLBaseStore):
                 row["account_data_type"]: db_to_json(row["content"]) for row in rows
             }
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_account_data_for_room", get_account_data_for_room_txn
         )
 
@@ -158,7 +158,7 @@ class AccountDataWorkerStore(SQLBaseStore):
         """
 
         def get_account_data_for_room_and_type_txn(txn):
-            content_json = self.db.simple_select_one_onecol_txn(
+            content_json = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="room_account_data",
                 keyvalues={
@@ -172,7 +172,7 @@ class AccountDataWorkerStore(SQLBaseStore):
 
             return db_to_json(content_json) if content_json else None
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_account_data_for_room_and_type", get_account_data_for_room_and_type_txn
         )
 
@@ -202,7 +202,7 @@ class AccountDataWorkerStore(SQLBaseStore):
             txn.execute(sql, (last_id, current_id, limit))
             return txn.fetchall()
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_updated_global_account_data", get_updated_global_account_data_txn
         )
 
@@ -232,7 +232,7 @@ class AccountDataWorkerStore(SQLBaseStore):
             txn.execute(sql, (last_id, current_id, limit))
             return txn.fetchall()
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_updated_room_account_data", get_updated_room_account_data_txn
         )
 
@@ -277,7 +277,7 @@ class AccountDataWorkerStore(SQLBaseStore):
         if not changed:
             return defer.succeed(({}, {}))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_updated_account_data_for_user", get_updated_account_data_for_user_txn
         )
 
@@ -295,7 +295,7 @@ class AccountDataWorkerStore(SQLBaseStore):
 
 
 class AccountDataStore(AccountDataWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         self._account_data_id_gen = StreamIdGenerator(
             db_conn,
             "account_data_max_stream_id",
@@ -333,7 +333,7 @@ class AccountDataStore(AccountDataWorkerStore):
             # no need to lock here as room_account_data has a unique constraint
             # on (user_id, room_id, account_data_type) so simple_upsert will
             # retry if there is a conflict.
-            yield self.db.simple_upsert(
+            yield self.db_pool.simple_upsert(
                 desc="add_room_account_data",
                 table="room_account_data",
                 keyvalues={
@@ -379,7 +379,7 @@ class AccountDataStore(AccountDataWorkerStore):
             # no need to lock here as account_data has a unique constraint on
             # (user_id, account_data_type) so simple_upsert will retry if
             # there is a conflict.
-            yield self.db.simple_upsert(
+            yield self.db_pool.simple_upsert(
                 desc="add_user_account_data",
                 table="account_data",
                 keyvalues={"user_id": user_id, "account_data_type": account_data_type},
@@ -427,4 +427,4 @@ class AccountDataStore(AccountDataWorkerStore):
             )
             txn.execute(update_max_id_sql, (next_id, next_id))
 
-        return self.db.runInteraction("update_account_data_max_stream_id", _update)
+        return self.db_pool.runInteraction("update_account_data_max_stream_id", _update)
diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/databases/main/appservice.py
index 56659fed37..055a3962dc 100644
--- a/synapse/storage/data_stores/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -23,8 +23,8 @@ from twisted.internet import defer
 from synapse.appservice import AppServiceTransaction
 from synapse.config.appservice import load_appservices
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
 
 logger = logging.getLogger(__name__)
 
@@ -49,7 +49,7 @@ def _make_exclusive_regex(services_cache):
 
 
 class ApplicationServiceWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         self.services_cache = load_appservices(
             hs.hostname, hs.config.app_service_config_files
         )
@@ -134,7 +134,7 @@ class ApplicationServiceTransactionWorkerStore(
             A Deferred which resolves to a list of ApplicationServices, which
             may be empty.
         """
-        results = yield self.db.simple_select_list(
+        results = yield self.db_pool.simple_select_list(
             "application_services_state", {"state": state}, ["as_id"]
         )
         # NB: This assumes this class is linked with ApplicationServiceStore
@@ -156,7 +156,7 @@ class ApplicationServiceTransactionWorkerStore(
         Returns:
             A Deferred which resolves to ApplicationServiceState.
         """
-        result = yield self.db.simple_select_one(
+        result = yield self.db_pool.simple_select_one(
             "application_services_state",
             {"as_id": service.id},
             ["state"],
@@ -176,7 +176,7 @@ class ApplicationServiceTransactionWorkerStore(
         Returns:
             A Deferred which resolves when the state was set successfully.
         """
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             "application_services_state", {"as_id": service.id}, {"state": state}
         )
 
@@ -217,7 +217,9 @@ class ApplicationServiceTransactionWorkerStore(
             )
             return AppServiceTransaction(service=service, id=new_txn_id, events=events)
 
-        return self.db.runInteraction("create_appservice_txn", _create_appservice_txn)
+        return self.db_pool.runInteraction(
+            "create_appservice_txn", _create_appservice_txn
+        )
 
     def complete_appservice_txn(self, txn_id, service):
         """Completes an application service transaction.
@@ -250,7 +252,7 @@ class ApplicationServiceTransactionWorkerStore(
                 )
 
             # Set current txn_id for AS to 'txn_id'
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 "application_services_state",
                 {"as_id": service.id},
@@ -258,13 +260,13 @@ class ApplicationServiceTransactionWorkerStore(
             )
 
             # Delete txn
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 "application_services_txns",
                 {"txn_id": txn_id, "as_id": service.id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "complete_appservice_txn", _complete_appservice_txn
         )
 
@@ -288,7 +290,7 @@ class ApplicationServiceTransactionWorkerStore(
                 " ORDER BY txn_id ASC LIMIT 1",
                 (service.id,),
             )
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if not rows:
                 return None
 
@@ -296,7 +298,7 @@ class ApplicationServiceTransactionWorkerStore(
 
             return entry
 
-        entry = yield self.db.runInteraction(
+        entry = yield self.db_pool.runInteraction(
             "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn
         )
 
@@ -326,7 +328,7 @@ class ApplicationServiceTransactionWorkerStore(
                 "UPDATE appservice_stream_position SET stream_ordering = ?", (pos,)
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "set_appservice_last_pos", set_appservice_last_pos_txn
         )
 
@@ -355,7 +357,7 @@ class ApplicationServiceTransactionWorkerStore(
 
             return upper_bound, [row[1] for row in rows]
 
-        upper_bound, event_ids = yield self.db.runInteraction(
+        upper_bound, event_ids = yield self.db_pool.runInteraction(
             "get_new_events_for_appservice", get_new_events_for_appservice_txn
         )
 
diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/databases/main/cache.py
index edc3624fed..683afde52b 100644
--- a/synapse/storage/data_stores/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -26,7 +26,7 @@ from synapse.replication.tcp.streams.events import (
     EventsStreamEventRow,
 )
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.engines import PostgresEngine
 from synapse.util.iterutils import batch_iter
 
@@ -39,7 +39,7 @@ CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
 
 
 class CacheInvalidationWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super().__init__(database, db_conn, hs)
 
         self._instance_name = hs.get_instance_name()
@@ -92,7 +92,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_updated_caches", get_all_updated_caches_txn
         )
 
@@ -203,7 +203,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             return
 
         cache_func.invalidate(keys)
-        await self.db.runInteraction(
+        await self.db_pool.runInteraction(
             "invalidate_cache_and_stream",
             self._send_invalidation_to_replication,
             cache_func.__name__,
@@ -288,7 +288,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
             if keys is not None:
                 keys = list(keys)
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="cache_invalidation_stream_by_instance",
                 values={
diff --git a/synapse/storage/data_stores/main/censor_events.py b/synapse/storage/databases/main/censor_events.py
index 2d48261724..1de8249563 100644
--- a/synapse/storage/data_stores/main/censor_events.py
+++ b/synapse/storage/databases/main/censor_events.py
@@ -21,10 +21,10 @@ from twisted.internet import defer
 from synapse.events.utils import prune_event_dict
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.cache import CacheInvalidationWorkerStore
-from synapse.storage.data_stores.main.events import encode_json
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
+from synapse.storage.databases.main.events import encode_json
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
@@ -34,7 +34,7 @@ logger = logging.getLogger(__name__)
 
 
 class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs: "HomeServer"):
+    def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
         super().__init__(database, db_conn, hs)
 
         def _censor_redactions():
@@ -56,7 +56,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
             return
 
         if not (
-            await self.db.updates.has_completed_background_update(
+            await self.db_pool.updates.has_completed_background_update(
                 "redactions_have_censored_ts_idx"
             )
         ):
@@ -85,7 +85,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
             LIMIT ?
         """
 
-        rows = await self.db.execute(
+        rows = await self.db_pool.execute(
             "_censor_redactions_fetch", None, sql, before_ts, 100
         )
 
@@ -123,14 +123,14 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
                 if pruned_json:
                     self._censor_event_txn(txn, event_id, pruned_json)
 
-                self.db.simple_update_one_txn(
+                self.db_pool.simple_update_one_txn(
                     txn,
                     table="redactions",
                     keyvalues={"event_id": redaction_id},
                     updatevalues={"have_censored": True},
                 )
 
-        await self.db.runInteraction("_update_censor_txn", _update_censor_txn)
+        await self.db_pool.runInteraction("_update_censor_txn", _update_censor_txn)
 
     def _censor_event_txn(self, txn, event_id, pruned_json):
         """Censor an event by replacing its JSON in the event_json table with the
@@ -141,7 +141,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
             event_id (str): The ID of the event to censor.
             pruned_json (str): The pruned JSON
         """
-        self.db.simple_update_one_txn(
+        self.db_pool.simple_update_one_txn(
             txn,
             table="event_json",
             keyvalues={"event_id": event_id},
@@ -193,7 +193,9 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
                 txn, "_get_event_cache", (event.event_id,)
             )
 
-        yield self.db.runInteraction("delete_expired_event", delete_expired_event_txn)
+        yield self.db_pool.runInteraction(
+            "delete_expired_event", delete_expired_event_txn
+        )
 
     def _delete_event_expiry_txn(self, txn, event_id):
         """Delete the expiry timestamp associated with an event ID without deleting the
@@ -203,6 +205,6 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
             txn (LoggingTransaction): The transaction to use to perform the deletion.
             event_id (str): The event ID to delete the associated expiry timestamp of.
         """
-        return self.db.simple_delete_txn(
+        return self.db_pool.simple_delete_txn(
             txn=txn, table="event_expiry", keyvalues={"event_id": event_id}
         )
diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 995d4764a9..712c8d0264 100644
--- a/synapse/storage/data_stores/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -19,7 +19,7 @@ from twisted.internet import defer
 
 from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database, make_tuple_comparison_clause
+from synapse.storage.database import DatabasePool, make_tuple_comparison_clause
 from synapse.util.caches.descriptors import Cache
 
 logger = logging.getLogger(__name__)
@@ -31,40 +31,40 @@ LAST_SEEN_GRANULARITY = 120 * 1000
 
 
 class ClientIpBackgroundUpdateStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(ClientIpBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "user_ips_device_index",
             index_name="user_ips_device_id",
             table="user_ips",
             columns=["user_id", "device_id", "last_seen"],
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "user_ips_last_seen_index",
             index_name="user_ips_last_seen",
             table="user_ips",
             columns=["user_id", "last_seen"],
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "user_ips_last_seen_only_index",
             index_name="user_ips_last_seen_only",
             table="user_ips",
             columns=["last_seen"],
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "user_ips_analyze", self._analyze_user_ip
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "user_ips_remove_dupes", self._remove_user_ip_dupes
         )
 
         # Register a unique index
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "user_ips_device_unique_index",
             index_name="user_ips_user_token_ip_unique_index",
             table="user_ips",
@@ -73,12 +73,12 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
         )
 
         # Drop the old non-unique index
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "user_ips_drop_nonunique_index", self._remove_user_ip_nonunique
         )
 
         # Update the last seen info in devices.
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "devices_last_seen", self._devices_last_seen_update
         )
 
@@ -89,8 +89,10 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
             txn.execute("DROP INDEX IF EXISTS user_ips_user_ip")
             txn.close()
 
-        yield self.db.runWithConnection(f)
-        yield self.db.updates._end_background_update("user_ips_drop_nonunique_index")
+        yield self.db_pool.runWithConnection(f)
+        yield self.db_pool.updates._end_background_update(
+            "user_ips_drop_nonunique_index"
+        )
         return 1
 
     @defer.inlineCallbacks
@@ -104,9 +106,9 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
         def user_ips_analyze(txn):
             txn.execute("ANALYZE user_ips")
 
-        yield self.db.runInteraction("user_ips_analyze", user_ips_analyze)
+        yield self.db_pool.runInteraction("user_ips_analyze", user_ips_analyze)
 
-        yield self.db.updates._end_background_update("user_ips_analyze")
+        yield self.db_pool.updates._end_background_update("user_ips_analyze")
 
         return 1
 
@@ -138,7 +140,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                 return None
 
         # Get a last seen that has roughly `batch_size` since `begin_last_seen`
-        end_last_seen = yield self.db.runInteraction(
+        end_last_seen = yield self.db_pool.runInteraction(
             "user_ips_dups_get_last_seen", get_last_seen
         )
 
@@ -269,14 +271,14 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
                     (user_id, access_token, ip, device_id, user_agent, last_seen),
                 )
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, "user_ips_remove_dupes", {"last_seen": end_last_seen}
             )
 
-        yield self.db.runInteraction("user_ips_dups_remove", remove)
+        yield self.db_pool.runInteraction("user_ips_dups_remove", remove)
 
         if last:
-            yield self.db.updates._end_background_update("user_ips_remove_dupes")
+            yield self.db_pool.updates._end_background_update("user_ips_remove_dupes")
 
         return batch_size
 
@@ -336,7 +338,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
             txn.execute_batch(sql, rows)
 
             _, _, _, user_id, device_id = rows[-1]
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn,
                 "devices_last_seen",
                 {"last_user_id": user_id, "last_device_id": device_id},
@@ -344,18 +346,18 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore):
 
             return len(rows)
 
-        updated = yield self.db.runInteraction(
+        updated = yield self.db_pool.runInteraction(
             "_devices_last_seen_update", _devices_last_seen_update_txn
         )
 
         if not updated:
-            yield self.db.updates._end_background_update("devices_last_seen")
+            yield self.db_pool.updates._end_background_update("devices_last_seen")
 
         return updated
 
 
 class ClientIpStore(ClientIpBackgroundUpdateStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
 
         self.client_ip_last_seen = Cache(
             name="client_ip_last_seen", keylen=4, max_entries=50000
@@ -403,18 +405,18 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
     def _update_client_ips_batch(self):
 
         # If the DB pool has already terminated, don't try updating
-        if not self.db.is_running():
+        if not self.db_pool.is_running():
             return
 
         to_update = self._batch_row_update
         self._batch_row_update = {}
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
         )
 
     def _update_client_ips_batch_txn(self, txn, to_update):
-        if "user_ips" in self.db._unsafe_to_upsert_tables or (
+        if "user_ips" in self.db_pool._unsafe_to_upsert_tables or (
             not self.database_engine.can_native_upsert
         ):
             self.database_engine.lock_table(txn, "user_ips")
@@ -423,7 +425,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
             (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry
 
             try:
-                self.db.simple_upsert_txn(
+                self.db_pool.simple_upsert_txn(
                     txn,
                     table="user_ips",
                     keyvalues={
@@ -445,7 +447,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                     # this is always an update rather than an upsert: the row should
                     # already exist, and if it doesn't, that may be because it has been
                     # deleted, and we don't want to re-create it.
-                    self.db.simple_update_txn(
+                    self.db_pool.simple_update_txn(
                         txn,
                         table="devices",
                         keyvalues={"user_id": user_id, "device_id": device_id},
@@ -477,7 +479,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
         if device_id is not None:
             keyvalues["device_id"] = device_id
 
-        res = yield self.db.simple_select_list(
+        res = yield self.db_pool.simple_select_list(
             table="devices",
             keyvalues=keyvalues,
             retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"),
@@ -510,7 +512,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
                 user_agent, _, last_seen = self._batch_row_update[key]
                 results[(access_token, ip)] = (user_agent, last_seen)
 
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="user_ips",
             keyvalues={"user_id": user_id},
             retcols=["access_token", "ip", "user_agent", "last_seen"],
@@ -540,7 +542,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
             # Nothing to do
             return
 
-        if not await self.db.updates.has_completed_background_update(
+        if not await self.db_pool.updates.has_completed_background_update(
             "devices_last_seen"
         ):
             # Only start pruning if we have finished populating the devices
@@ -573,4 +575,6 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
         def _prune_old_user_ips_txn(txn):
             txn.execute(sql, (timestamp,))
 
-        await self.db.runInteraction("_prune_old_user_ips", _prune_old_user_ips_txn)
+        await self.db_pool.runInteraction(
+            "_prune_old_user_ips", _prune_old_user_ips_txn
+        )
diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index da297b31fb..874ecdf8d2 100644
--- a/synapse/storage/data_stores/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -22,7 +22,7 @@ from twisted.internet import defer
 
 from synapse.logging.opentracing import log_kv, set_tag, trace
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.util.caches.expiringcache import ExpiringCache
 
 logger = logging.getLogger(__name__)
@@ -70,7 +70,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 stream_pos = current_stream_id
             return messages, stream_pos
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_new_messages_for_device", get_new_messages_for_device_txn
         )
 
@@ -110,7 +110,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             txn.execute(sql, (user_id, device_id, up_to_stream_id))
             return txn.rowcount
 
-        count = yield self.db.runInteraction(
+        count = yield self.db_pool.runInteraction(
             "delete_messages_for_device", delete_messages_for_device_txn
         )
 
@@ -179,7 +179,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 stream_pos = current_stream_id
             return messages, stream_pos
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_new_device_msgs_for_remote",
             get_new_messages_for_remote_destination_txn,
         )
@@ -204,7 +204,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
             )
             txn.execute(sql, (destination, up_to_stream_id))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn
         )
 
@@ -269,7 +269,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_new_device_messages", get_all_new_device_messages_txn
         )
 
@@ -277,17 +277,17 @@ class DeviceInboxWorkerStore(SQLBaseStore):
 class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
     DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(DeviceInboxBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "device_inbox_stream_index",
             index_name="device_inbox_stream_id_user_id",
             table="device_inbox",
             columns=["stream_id", "user_id"],
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
         )
 
@@ -298,9 +298,9 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
             txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id")
             txn.close()
 
-        yield self.db.runWithConnection(reindex_txn)
+        yield self.db_pool.runWithConnection(reindex_txn)
 
-        yield self.db.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
+        yield self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
 
         return 1
 
@@ -308,7 +308,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
 class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore):
     DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(DeviceInboxStore, self).__init__(database, db_conn, hs)
 
         # Map of (user_id, device_id) to the last stream_id that has been
@@ -360,7 +360,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
 
         with self._device_inbox_id_gen.get_next() as stream_id:
             now_ms = self.clock.time_msec()
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
             )
             for user_id in local_messages_by_user_then_device.keys():
@@ -380,7 +380,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
             # Check if we've already inserted a matching message_id for that
             # origin. This can happen if the origin doesn't receive our
             # acknowledgement from the first time we received the message.
-            already_inserted = self.db.simple_select_one_txn(
+            already_inserted = self.db_pool.simple_select_one_txn(
                 txn,
                 table="device_federation_inbox",
                 keyvalues={"origin": origin, "message_id": message_id},
@@ -392,7 +392,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
 
             # Add an entry for this message_id so that we know we've processed
             # it.
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="device_federation_inbox",
                 values={
@@ -410,7 +410,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore)
 
         with self._device_inbox_id_gen.get_next() as stream_id:
             now_ms = self.clock.time_msec()
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "add_messages_from_remote_to_device_inbox",
                 add_messages_txn,
                 now_ms,
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/databases/main/devices.py
index 45581a6500..88a7aadfc6 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -31,7 +31,7 @@ from synapse.logging.opentracing import (
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import (
-    Database,
+    DatabasePool,
     LoggingTransaction,
     make_tuple_comparison_clause,
 )
@@ -67,7 +67,7 @@ class DeviceWorkerStore(SQLBaseStore):
         Raises:
             StoreError: if the device is not found
         """
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             retcols=("user_id", "device_id", "display_name"),
@@ -86,7 +86,7 @@ class DeviceWorkerStore(SQLBaseStore):
             containing "device_id", "user_id" and "display_name" for each
             device.
         """
-        devices = yield self.db.simple_select_list(
+        devices = yield self.db_pool.simple_select_list(
             table="devices",
             keyvalues={"user_id": user_id, "hidden": False},
             retcols=("user_id", "device_id", "display_name"),
@@ -118,7 +118,7 @@ class DeviceWorkerStore(SQLBaseStore):
         if not has_changed:
             return now_stream_id, []
 
-        updates = yield self.db.runInteraction(
+        updates = yield self.db_pool.runInteraction(
             "get_device_updates_by_remote",
             self._get_device_updates_by_remote_txn,
             destination,
@@ -255,7 +255,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
         """
         devices = (
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_get_e2e_device_keys_txn",
                 self._get_e2e_device_keys_txn,
                 query_map.keys(),
@@ -326,12 +326,12 @@ class DeviceWorkerStore(SQLBaseStore):
             rows = txn.fetchall()
             return rows[0][0]
 
-        return self.db.runInteraction("get_last_device_update_for_remote_user", f)
+        return self.db_pool.runInteraction("get_last_device_update_for_remote_user", f)
 
     def mark_as_sent_devices_by_remote(self, destination, stream_id):
         """Mark that updates have successfully been sent to the destination.
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "mark_as_sent_devices_by_remote",
             self._mark_as_sent_devices_by_remote_txn,
             destination,
@@ -350,7 +350,7 @@ class DeviceWorkerStore(SQLBaseStore):
         txn.execute(sql, (destination, stream_id))
         rows = txn.fetchall()
 
-        self.db.simple_upsert_many_txn(
+        self.db_pool.simple_upsert_many_txn(
             txn=txn,
             table="device_lists_outbound_last_success",
             key_names=("destination", "user_id"),
@@ -376,7 +376,7 @@ class DeviceWorkerStore(SQLBaseStore):
         """
 
         with self._device_list_id_gen.get_next() as stream_id:
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "add_user_sig_change_to_streams",
                 self._add_user_signature_change_txn,
                 from_user_id,
@@ -391,7 +391,7 @@ class DeviceWorkerStore(SQLBaseStore):
             from_user_id,
             stream_id,
         )
-        self.db.simple_insert_txn(
+        self.db_pool.simple_insert_txn(
             txn,
             "user_signature_stream",
             values={
@@ -449,7 +449,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
     @cachedInlineCallbacks(num_args=2, tree=True)
     def _get_cached_user_device(self, user_id, device_id):
-        content = yield self.db.simple_select_one_onecol(
+        content = yield self.db_pool.simple_select_one_onecol(
             table="device_lists_remote_cache",
             keyvalues={"user_id": user_id, "device_id": device_id},
             retcol="content",
@@ -459,7 +459,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
     @cachedInlineCallbacks()
     def get_cached_devices_for_user(self, user_id):
-        devices = yield self.db.simple_select_list(
+        devices = yield self.db_pool.simple_select_list(
             table="device_lists_remote_cache",
             keyvalues={"user_id": user_id},
             retcols=("device_id", "content"),
@@ -475,7 +475,7 @@ class DeviceWorkerStore(SQLBaseStore):
         Returns:
             (stream_id, devices)
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_devices_with_keys_by_user",
             self._get_devices_with_keys_by_user_txn,
             user_id,
@@ -555,7 +555,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
             return changes
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_users_whose_devices_changed", _get_users_whose_devices_changed_txn
         )
 
@@ -574,7 +574,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 SELECT DISTINCT user_ids FROM user_signature_stream
                 WHERE from_user_id = ? AND stream_id > ?
             """
-            rows = yield self.db.execute(
+            rows = yield self.db_pool.execute(
                 "get_users_whose_signatures_changed", None, sql, user_id, from_key
             )
             return {user for row in rows for user in db_to_json(row[0])}
@@ -631,7 +631,7 @@ class DeviceWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_device_list_changes_for_remotes",
             _get_all_device_list_changes_for_remotes,
         )
@@ -641,7 +641,7 @@ class DeviceWorkerStore(SQLBaseStore):
         """Get the last stream_id we got for a user. May be None if we haven't
         got any information for them.
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="device_lists_remote_extremeties",
             keyvalues={"user_id": user_id},
             retcol="stream_id",
@@ -655,7 +655,7 @@ class DeviceWorkerStore(SQLBaseStore):
         inlineCallbacks=True,
     )
     def get_device_list_last_stream_id_for_remotes(self, user_ids):
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="device_lists_remote_extremeties",
             column="user_id",
             iterable=user_ids,
@@ -680,7 +680,7 @@ class DeviceWorkerStore(SQLBaseStore):
             The IDs of users whose device lists need resync.
         """
         if user_ids:
-            rows = yield self.db.simple_select_many_batch(
+            rows = yield self.db_pool.simple_select_many_batch(
                 table="device_lists_remote_resync",
                 column="user_id",
                 iterable=user_ids,
@@ -688,7 +688,7 @@ class DeviceWorkerStore(SQLBaseStore):
                 desc="get_user_ids_requiring_device_list_resync_with_iterable",
             )
         else:
-            rows = yield self.db.simple_select_list(
+            rows = yield self.db_pool.simple_select_list(
                 table="device_lists_remote_resync",
                 keyvalues=None,
                 retcols=("user_id",),
@@ -701,7 +701,7 @@ class DeviceWorkerStore(SQLBaseStore):
         """Records that the server has reason to believe the cache of the devices
         for the remote users is out of date.
         """
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="device_lists_remote_resync",
             keyvalues={"user_id": user_id},
             values={},
@@ -714,7 +714,7 @@ class DeviceWorkerStore(SQLBaseStore):
         """
 
         def _mark_remote_user_device_list_as_unsubscribed_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="device_lists_remote_extremeties",
                 keyvalues={"user_id": user_id},
@@ -723,17 +723,17 @@ class DeviceWorkerStore(SQLBaseStore):
                 txn, self.get_device_list_last_stream_id_for_remote, (user_id,)
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "mark_remote_user_device_list_as_unsubscribed",
             _mark_remote_user_device_list_as_unsubscribed_txn,
         )
 
 
 class DeviceBackgroundUpdateStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(DeviceBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "device_lists_stream_idx",
             index_name="device_lists_stream_user_id",
             table="device_lists_stream",
@@ -741,7 +741,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
         )
 
         # create a unique index on device_lists_remote_cache
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "device_lists_remote_cache_unique_idx",
             index_name="device_lists_remote_cache_unique_id",
             table="device_lists_remote_cache",
@@ -750,7 +750,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
         )
 
         # And one on device_lists_remote_extremeties
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "device_lists_remote_extremeties_unique_idx",
             index_name="device_lists_remote_extremeties_unique_idx",
             table="device_lists_remote_extremeties",
@@ -759,22 +759,22 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
         )
 
         # once they complete, we can remove the old non-unique indexes.
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
             self._drop_device_list_streams_non_unique_indexes,
         )
 
         # clear out duplicate device list outbound pokes
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, self._remove_duplicate_outbound_pokes,
         )
 
         # a pair of background updates that were added during the 1.14 release cycle,
         # but replaced with 58/06dlols_unique_idx.py
-        self.db.updates.register_noop_background_update(
+        self.db_pool.updates.register_noop_background_update(
             "device_lists_outbound_last_success_unique_idx",
         )
-        self.db.updates.register_noop_background_update(
+        self.db_pool.updates.register_noop_background_update(
             "drop_device_lists_outbound_last_success_non_unique_idx",
         )
 
@@ -786,8 +786,8 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
             txn.execute("DROP INDEX IF EXISTS device_lists_remote_extremeties_id")
             txn.close()
 
-        yield self.db.runWithConnection(f)
-        yield self.db.updates._end_background_update(
+        yield self.db_pool.runWithConnection(f)
+        yield self.db_pool.updates._end_background_update(
             DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES
         )
         return 1
@@ -807,7 +807,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
 
         def _txn(txn):
             clause, args = make_tuple_comparison_clause(
-                self.db.engine, [(x, last_row[x]) for x in KEY_COLS]
+                self.db_pool.engine, [(x, last_row[x]) for x in KEY_COLS]
             )
             sql = """
                 SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts
@@ -823,30 +823,32 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
                 ",".join(KEY_COLS),  # ORDER BY
             )
             txn.execute(sql, args + [batch_size])
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             row = None
             for row in rows:
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn, "device_lists_outbound_pokes", {x: row[x] for x in KEY_COLS},
                 )
 
                 row["sent"] = False
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn, "device_lists_outbound_pokes", row,
                 )
 
             if row:
-                self.db.updates._background_update_progress_txn(
+                self.db_pool.updates._background_update_progress_txn(
                     txn, BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, {"last_row": row},
                 )
 
             return len(rows)
 
-        rows = await self.db.runInteraction(BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, _txn)
+        rows = await self.db_pool.runInteraction(
+            BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, _txn
+        )
 
         if not rows:
-            await self.db.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES
             )
 
@@ -854,7 +856,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
 
 
 class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(DeviceStore, self).__init__(database, db_conn, hs)
 
         # Map of (user_id, device_id) -> bool. If there is an entry that implies
@@ -885,7 +887,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             return False
 
         try:
-            inserted = yield self.db.simple_insert(
+            inserted = yield self.db_pool.simple_insert(
                 "devices",
                 values={
                     "user_id": user_id,
@@ -899,7 +901,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             if not inserted:
                 # if the device already exists, check if it's a real device, or
                 # if the device ID is reserved by something else
-                hidden = yield self.db.simple_select_one_onecol(
+                hidden = yield self.db_pool.simple_select_one_onecol(
                     "devices",
                     keyvalues={"user_id": user_id, "device_id": device_id},
                     retcol="hidden",
@@ -934,7 +936,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         Returns:
             defer.Deferred
         """
-        yield self.db.simple_delete_one(
+        yield self.db_pool.simple_delete_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             desc="delete_device",
@@ -952,7 +954,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         Returns:
             defer.Deferred
         """
-        yield self.db.simple_delete_many(
+        yield self.db_pool.simple_delete_many(
             table="devices",
             column="device_id",
             iterable=device_ids,
@@ -981,7 +983,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             updates["display_name"] = new_display_name
         if not updates:
             return defer.succeed(None)
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="devices",
             keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
             updatevalues=updates,
@@ -1005,7 +1007,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         Returns:
             Deferred[None]
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "update_remote_device_list_cache_entry",
             self._update_remote_device_list_cache_entry_txn,
             user_id,
@@ -1018,7 +1020,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         self, txn, user_id, device_id, content, stream_id
     ):
         if content.get("deleted"):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="device_lists_remote_cache",
                 keyvalues={"user_id": user_id, "device_id": device_id},
@@ -1026,7 +1028,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
 
             txn.call_after(self.device_id_exists_cache.invalidate, (user_id, device_id))
         else:
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="device_lists_remote_cache",
                 keyvalues={"user_id": user_id, "device_id": device_id},
@@ -1042,7 +1044,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
         )
 
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             table="device_lists_remote_extremeties",
             keyvalues={"user_id": user_id},
@@ -1066,7 +1068,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         Returns:
             Deferred[None]
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "update_remote_device_list_cache",
             self._update_remote_device_list_cache_txn,
             user_id,
@@ -1075,11 +1077,11 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         )
 
     def _update_remote_device_list_cache_txn(self, txn, user_id, devices, stream_id):
-        self.db.simple_delete_txn(
+        self.db_pool.simple_delete_txn(
             txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id}
         )
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="device_lists_remote_cache",
             values=[
@@ -1098,7 +1100,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
         )
 
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             table="device_lists_remote_extremeties",
             keyvalues={"user_id": user_id},
@@ -1111,7 +1113,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         # If we're replacing the remote user's device list cache presumably
         # we've done a full resync, so we remove the entry that says we need
         # to resync
-        self.db.simple_delete_txn(
+        self.db_pool.simple_delete_txn(
             txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id},
         )
 
@@ -1124,7 +1126,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             return
 
         with self._device_list_id_gen.get_next_mult(len(device_ids)) as stream_ids:
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "add_device_change_to_stream",
                 self._add_device_change_to_stream_txn,
                 user_id,
@@ -1139,7 +1141,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         with self._device_list_id_gen.get_next_mult(
             len(hosts) * len(device_ids)
         ) as stream_ids:
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "add_device_outbound_poke_to_stream",
                 self._add_device_outbound_poke_to_stream_txn,
                 user_id,
@@ -1174,7 +1176,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
             [(user_id, device_id, min_stream_id) for device_id in device_ids],
         )
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="device_lists_stream",
             values=[
@@ -1196,7 +1198,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
         now = self._clock.time_msec()
         next_stream_id = iter(stream_ids)
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="device_lists_outbound_pokes",
             values=[
@@ -1303,7 +1305,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
 
         return run_as_background_process(
             "prune_old_outbound_device_pokes",
-            self.db.runInteraction,
+            self.db_pool.runInteraction,
             "_prune_old_outbound_device_pokes",
             _prune_txn,
         )
diff --git a/synapse/storage/data_stores/main/directory.py b/synapse/storage/databases/main/directory.py
index e1d1bc3e05..7819bfcbb3 100644
--- a/synapse/storage/data_stores/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -37,7 +37,7 @@ class DirectoryWorkerStore(SQLBaseStore):
             Deferred: results in namedtuple with keys "room_id" and
             "servers" or None if no association can be found
         """
-        room_id = yield self.db.simple_select_one_onecol(
+        room_id = yield self.db_pool.simple_select_one_onecol(
             "room_aliases",
             {"room_alias": room_alias.to_string()},
             "room_id",
@@ -48,7 +48,7 @@ class DirectoryWorkerStore(SQLBaseStore):
         if not room_id:
             return None
 
-        servers = yield self.db.simple_select_onecol(
+        servers = yield self.db_pool.simple_select_onecol(
             "room_alias_servers",
             {"room_alias": room_alias.to_string()},
             "server",
@@ -61,7 +61,7 @@ class DirectoryWorkerStore(SQLBaseStore):
         return RoomAliasMapping(room_id, room_alias.to_string(), servers)
 
     def get_room_alias_creator(self, room_alias):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="room_aliases",
             keyvalues={"room_alias": room_alias},
             retcol="creator",
@@ -70,7 +70,7 @@ class DirectoryWorkerStore(SQLBaseStore):
 
     @cached(max_entries=5000)
     def get_aliases_for_room(self, room_id):
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             "room_aliases",
             {"room_id": room_id},
             "room_alias",
@@ -94,7 +94,7 @@ class DirectoryStore(DirectoryWorkerStore):
         """
 
         def alias_txn(txn):
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 "room_aliases",
                 {
@@ -104,7 +104,7 @@ class DirectoryStore(DirectoryWorkerStore):
                 },
             )
 
-            self.db.simple_insert_many_txn(
+            self.db_pool.simple_insert_many_txn(
                 txn,
                 table="room_alias_servers",
                 values=[
@@ -118,7 +118,7 @@ class DirectoryStore(DirectoryWorkerStore):
             )
 
         try:
-            ret = yield self.db.runInteraction(
+            ret = yield self.db_pool.runInteraction(
                 "create_room_alias_association", alias_txn
             )
         except self.database_engine.module.IntegrityError:
@@ -129,7 +129,7 @@ class DirectoryStore(DirectoryWorkerStore):
 
     @defer.inlineCallbacks
     def delete_room_alias(self, room_alias):
-        room_id = yield self.db.runInteraction(
+        room_id = yield self.db_pool.runInteraction(
             "delete_room_alias", self._delete_room_alias_txn, room_alias
         )
 
@@ -190,6 +190,6 @@ class DirectoryStore(DirectoryWorkerStore):
                 txn, self.get_aliases_for_room, (new_room_id,)
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "_update_aliases_for_room_txn", _update_aliases_for_room_txn
         )
diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 615364f018..90152edc3c 100644
--- a/synapse/storage/data_stores/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -38,7 +38,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             StoreError
         """
 
-        yield self.db.simple_update_one(
+        yield self.db_pool.simple_update_one(
             table="e2e_room_keys",
             keyvalues={
                 "user_id": user_id,
@@ -89,7 +89,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 }
             )
 
-        yield self.db.simple_insert_many(
+        yield self.db_pool.simple_insert_many(
             table="e2e_room_keys", values=values, desc="add_e2e_room_keys"
         )
 
@@ -125,7 +125,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             if session_id:
                 keyvalues["session_id"] = session_id
 
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="e2e_room_keys",
             keyvalues=keyvalues,
             retcols=(
@@ -171,7 +171,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
            Deferred[dict[str, dict[str, dict]]]: a map of room IDs to session IDs to room key
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_e2e_room_keys_multi",
             self._get_e2e_room_keys_multi_txn,
             user_id,
@@ -235,7 +235,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             version (str): the version ID of the backup we're querying about
         """
 
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="e2e_room_keys",
             keyvalues={"user_id": user_id, "version": version},
             retcol="COUNT(*)",
@@ -268,7 +268,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             if session_id:
                 keyvalues["session_id"] = session_id
 
-        yield self.db.simple_delete(
+        yield self.db_pool.simple_delete(
             table="e2e_room_keys", keyvalues=keyvalues, desc="delete_e2e_room_keys"
         )
 
@@ -313,7 +313,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                     # it isn't there.
                     raise StoreError(404, "No row found")
 
-            result = self.db.simple_select_one_txn(
+            result = self.db_pool.simple_select_one_txn(
                 txn,
                 table="e2e_room_keys_versions",
                 keyvalues={"user_id": user_id, "version": this_version, "deleted": 0},
@@ -325,7 +325,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
                 result["etag"] = 0
             return result
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn
         )
 
@@ -353,7 +353,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
 
             new_version = str(int(current_version) + 1)
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="e2e_room_keys_versions",
                 values={
@@ -366,7 +366,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
 
             return new_version
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
         )
 
@@ -392,7 +392,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             updatevalues["etag"] = version_etag
 
         if updatevalues:
-            return self.db.simple_update(
+            return self.db_pool.simple_update(
                 table="e2e_room_keys_versions",
                 keyvalues={"user_id": user_id, "version": version},
                 updatevalues=updatevalues,
@@ -421,19 +421,19 @@ class EndToEndRoomKeyStore(SQLBaseStore):
             else:
                 this_version = version
 
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="e2e_room_keys",
                 keyvalues={"user_id": user_id, "version": this_version},
             )
 
-            return self.db.simple_update_one_txn(
+            return self.db_pool.simple_update_one_txn(
                 txn,
                 table="e2e_room_keys_versions",
                 keyvalues={"user_id": user_id, "version": this_version},
                 updatevalues={"deleted": 1},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_e2e_room_keys_version", _delete_e2e_room_keys_version_txn
         )
diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 317c07a829..40354b8304 100644
--- a/synapse/storage/data_stores/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -51,7 +51,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         if not query_list:
             return {}
 
-        results = yield self.db.runInteraction(
+        results = yield self.db_pool.runInteraction(
             "get_e2e_device_keys",
             self._get_e2e_device_keys_txn,
             query_list,
@@ -128,7 +128,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         )
 
         txn.execute(sql, query_params)
-        rows = self.db.cursor_to_dict(txn)
+        rows = self.db_pool.cursor_to_dict(txn)
 
         result = {}
         for row in rows:
@@ -146,7 +146,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         )
 
         txn.execute(signature_sql, signature_query_params)
-        rows = self.db.cursor_to_dict(txn)
+        rows = self.db_pool.cursor_to_dict(txn)
 
         # add each cross-signing signature to the correct device in the result dict.
         for row in rows:
@@ -189,7 +189,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             key_id) to json string for key
         """
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="e2e_one_time_keys_json",
             column="key_id",
             iterable=key_ids,
@@ -222,7 +222,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             # a unique constraint. If there is a race of two calls to
             # `add_e2e_one_time_keys` then they'll conflict and we will only
             # insert one set.
-            self.db.simple_insert_many_txn(
+            self.db_pool.simple_insert_many_txn(
                 txn,
                 table="e2e_one_time_keys_json",
                 values=[
@@ -241,7 +241,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 txn, self.count_e2e_one_time_keys, (user_id, device_id)
             )
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "add_e2e_one_time_keys_insert", _add_e2e_one_time_keys
         )
 
@@ -264,7 +264,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 result[algorithm] = key_count
             return result
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "count_e2e_one_time_keys", _count_e2e_one_time_keys
         )
 
@@ -318,7 +318,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 to None.
 
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_bare_e2e_cross_signing_keys_bulk",
             self._get_bare_e2e_cross_signing_keys_bulk_txn,
             user_ids,
@@ -361,7 +361,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
             )
 
             txn.execute(sql, params)
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             for row in rows:
                 user_id = row["user_id"]
@@ -420,7 +420,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
                 query_params.extend(item)
 
             txn.execute(sql, query_params)
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             # and add the signatures to the appropriate keys
             for row in rows:
@@ -470,7 +470,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
         result = yield self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
 
         if from_user_id:
-            result = yield self.db.runInteraction(
+            result = yield self.db_pool.runInteraction(
                 "get_e2e_cross_signing_signatures",
                 self._get_e2e_cross_signing_signatures_txn,
                 result,
@@ -531,7 +531,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_user_signature_changes_for_remotes",
             _get_all_user_signature_changes_for_remotes_txn,
         )
@@ -549,7 +549,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
             set_tag("time_now", time_now)
             set_tag("device_keys", device_keys)
 
-            old_key_json = self.db.simple_select_one_onecol_txn(
+            old_key_json = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="e2e_device_keys_json",
                 keyvalues={"user_id": user_id, "device_id": device_id},
@@ -565,7 +565,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 log_kv({"Message": "Device key already stored."})
                 return False
 
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="e2e_device_keys_json",
                 keyvalues={"user_id": user_id, "device_id": device_id},
@@ -574,7 +574,9 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
             log_kv({"message": "Device keys stored."})
             return True
 
-        return self.db.runInteraction("set_e2e_device_keys", _set_e2e_device_keys_txn)
+        return self.db_pool.runInteraction(
+            "set_e2e_device_keys", _set_e2e_device_keys_txn
+        )
 
     def claim_e2e_one_time_keys(self, query_list):
         """Take a list of one time keys out of the database"""
@@ -613,7 +615,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 )
             return result
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "claim_e2e_one_time_keys", _claim_e2e_one_time_keys
         )
 
@@ -626,12 +628,12 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                     "user_id": user_id,
                 }
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="e2e_device_keys_json",
                 keyvalues={"user_id": user_id, "device_id": device_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="e2e_one_time_keys_json",
                 keyvalues={"user_id": user_id, "device_id": device_id},
@@ -640,7 +642,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
                 txn, self.count_e2e_one_time_keys, (user_id, device_id)
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn
         )
 
@@ -679,7 +681,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
         # We only need to do this for local users, since remote servers should be
         # responsible for checking this for their own users.
         if self.hs.is_mine_id(user_id):
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 "devices",
                 values={
@@ -692,7 +694,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
 
         # and finally, store the key itself
         with self._cross_signing_id_gen.get_next() as stream_id:
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 "e2e_cross_signing_keys",
                 values={
@@ -715,7 +717,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
             key_type (str): the type of cross-signing key to set
             key (dict): the key data
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "add_e2e_cross_signing_key",
             self._set_e2e_cross_signing_key_txn,
             user_id,
@@ -730,7 +732,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
             user_id (str): the user who made the signatures
             signatures (iterable[SignatureListItem]): signatures to add
         """
-        return self.db.simple_insert_many(
+        return self.db_pool.simple_insert_many(
             "e2e_cross_signing_signatures",
             [
                 {
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index a6bb3221ff..eddb32b4d3 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -22,9 +22,9 @@ from twisted.internet import defer
 from synapse.api.errors import StoreError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.data_stores.main.signatures import SignatureWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
 from synapse.util.caches.descriptors import cached
 from synapse.util.iterutils import batch_iter
 
@@ -65,7 +65,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         Returns:
             list of event_ids
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_auth_chain_ids",
             self._get_auth_chain_ids_txn,
             event_ids,
@@ -114,7 +114,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             Deferred[Set[str]]
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_auth_chain_difference",
             self._get_auth_chain_difference_txn,
             state_sets,
@@ -260,12 +260,12 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         return {eid for eid, n in event_to_missing_sets.items() if n}
 
     def get_oldest_events_in_room(self, room_id):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_oldest_events_in_room", self._get_oldest_events_in_room_txn, room_id
         )
 
     def get_oldest_events_with_depth_in_room(self, room_id):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_oldest_events_with_depth_in_room",
             self.get_oldest_events_with_depth_in_room_txn,
             room_id,
@@ -296,7 +296,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         Returns
             Deferred[int]
         """
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="events",
             column="event_id",
             iterable=event_ids,
@@ -310,7 +310,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             return max(row["depth"] for row in rows)
 
     def _get_oldest_events_in_room_txn(self, txn, room_id):
-        return self.db.simple_select_onecol_txn(
+        return self.db_pool.simple_select_onecol_txn(
             txn,
             table="event_backward_extremities",
             keyvalues={"room_id": room_id},
@@ -332,7 +332,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_prev_events_for_room", self._get_prev_events_for_room_txn, room_id
         )
 
@@ -387,13 +387,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             txn.execute(sql, query_args)
             return [room_id for room_id, in txn]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_rooms_with_many_extremities", _get_rooms_with_many_extremities_txn
         )
 
     @cached(max_entries=5000, iterable=True)
     def get_latest_event_ids_in_room(self, room_id):
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="event_forward_extremities",
             keyvalues={"room_id": room_id},
             retcol="event_id",
@@ -403,12 +403,12 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
     def get_min_depth(self, room_id):
         """ For hte given room, get the minimum depth we have seen for it.
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_min_depth", self._get_min_depth_interaction, room_id
         )
 
     def _get_min_depth_interaction(self, txn, room_id):
-        min_depth = self.db.simple_select_one_onecol_txn(
+        min_depth = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="room_depth",
             keyvalues={"room_id": room_id},
@@ -474,7 +474,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             txn.execute(sql, (stream_ordering, room_id))
             return [event_id for event_id, in txn]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn
         )
 
@@ -489,7 +489,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
             limit (int)
         """
         return (
-            self.db.runInteraction(
+            self.db_pool.runInteraction(
                 "get_backfill_events",
                 self._get_backfill_events,
                 room_id,
@@ -520,7 +520,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         queue = PriorityQueue()
 
         for event_id in event_list:
-            depth = self.db.simple_select_one_onecol_txn(
+            depth = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="events",
                 keyvalues={"event_id": event_id, "room_id": room_id},
@@ -552,7 +552,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
     @defer.inlineCallbacks
     def get_missing_events(self, room_id, earliest_events, latest_events, limit):
-        ids = yield self.db.runInteraction(
+        ids = yield self.db_pool.runInteraction(
             "get_missing_events",
             self._get_missing_events,
             room_id,
@@ -605,7 +605,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         Returns:
             Deferred[list[str]]
         """
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="event_edges",
             column="prev_event_id",
             iterable=event_ids,
@@ -628,10 +628,10 @@ class EventFederationStore(EventFederationWorkerStore):
 
     EVENT_AUTH_STATE_ONLY = "event_auth_state_only"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(EventFederationStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth
         )
 
@@ -658,13 +658,13 @@ class EventFederationStore(EventFederationWorkerStore):
 
         return run_as_background_process(
             "delete_old_forward_extrem_cache",
-            self.db.runInteraction,
+            self.db_pool.runInteraction,
             "_delete_old_forward_extrem_cache",
             _delete_old_forward_extrem_cache_txn,
         )
 
     def clean_room_for_join(self, room_id):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "clean_room_for_join", self._clean_room_for_join_txn, room_id
         )
 
@@ -708,17 +708,19 @@ class EventFederationStore(EventFederationWorkerStore):
                 "max_stream_id_exclusive": min_stream_id,
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.EVENT_AUTH_STATE_ONLY, new_progress
             )
 
             return min_stream_id >= target_min_stream_id
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             self.EVENT_AUTH_STATE_ONLY, delete_event_auth
         )
 
         if not result:
-            yield self.db.updates._end_background_update(self.EVENT_AUTH_STATE_ONLY)
+            yield self.db_pool.updates._end_background_update(
+                self.EVENT_AUTH_STATE_ONLY
+            )
 
         return batch_size
diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index ad82838901..b8cefb4d5e 100644
--- a/synapse/storage/data_stores/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -21,7 +21,7 @@ from canonicaljson import json
 
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.util.caches.descriptors import cachedInlineCallbacks
 
 logger = logging.getLogger(__name__)
@@ -66,7 +66,7 @@ def _deserialize_action(actions, is_highlight):
 
 
 class EventPushActionsWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(EventPushActionsWorkerStore, self).__init__(database, db_conn, hs)
 
         # These get correctly set by _find_stream_orderings_for_times_txn
@@ -91,7 +91,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
     def get_unread_event_push_actions_by_room_for_user(
         self, room_id, user_id, last_read_event_id
     ):
-        ret = yield self.db.runInteraction(
+        ret = yield self.db_pool.runInteraction(
             "get_unread_event_push_actions_by_room",
             self._get_unread_counts_by_receipt_txn,
             room_id,
@@ -176,7 +176,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, (min_stream_ordering, max_stream_ordering))
             return [r[0] for r in txn]
 
-        ret = await self.db.runInteraction("get_push_action_users_in_range", f)
+        ret = await self.db_pool.runInteraction("get_push_action_users_in_range", f)
         return ret
 
     async def get_unread_push_actions_for_user_in_range_for_http(
@@ -230,7 +230,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        after_read_receipt = await self.db.runInteraction(
+        after_read_receipt = await self.db_pool.runInteraction(
             "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
         )
 
@@ -258,7 +258,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        no_read_receipt = await self.db.runInteraction(
+        no_read_receipt = await self.db_pool.runInteraction(
             "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
         )
 
@@ -332,7 +332,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        after_read_receipt = await self.db.runInteraction(
+        after_read_receipt = await self.db_pool.runInteraction(
             "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
         )
 
@@ -360,7 +360,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, args)
             return txn.fetchall()
 
-        no_read_receipt = await self.db.runInteraction(
+        no_read_receipt = await self.db_pool.runInteraction(
             "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
         )
 
@@ -410,7 +410,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, (user_id, min_stream_ordering))
             return bool(txn.fetchone())
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_if_maybe_push_in_range_for_user",
             _get_if_maybe_push_in_range_for_user_txn,
         )
@@ -461,7 +461,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
                 ),
             )
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "add_push_actions_to_staging", _add_push_actions_to_staging_txn
         )
 
@@ -471,7 +471,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
         """
 
         try:
-            res = await self.db.simple_delete(
+            res = await self.db_pool.simple_delete(
                 table="event_push_actions_staging",
                 keyvalues={"event_id": event_id},
                 desc="remove_push_actions_from_staging",
@@ -488,7 +488,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
     def _find_stream_orderings_for_times(self):
         return run_as_background_process(
             "event_push_action_stream_orderings",
-            self.db.runInteraction,
+            self.db_pool.runInteraction,
             "_find_stream_orderings_for_times",
             self._find_stream_orderings_for_times_txn,
         )
@@ -524,7 +524,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             Deferred[int]: stream ordering of the first event received on/after
                 the timestamp
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "_find_first_stream_ordering_after_ts_txn",
             self._find_first_stream_ordering_after_ts_txn,
             ts,
@@ -619,24 +619,26 @@ class EventPushActionsWorkerStore(SQLBaseStore):
             txn.execute(sql, (stream_ordering,))
             return txn.fetchone()
 
-        result = await self.db.runInteraction("get_time_of_last_push_action_before", f)
+        result = await self.db_pool.runInteraction(
+            "get_time_of_last_push_action_before", f
+        )
         return result[0] if result else None
 
 
 class EventPushActionsStore(EventPushActionsWorkerStore):
     EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(EventPushActionsStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             self.EPA_HIGHLIGHT_INDEX,
             index_name="event_push_actions_u_highlight",
             table="event_push_actions",
             columns=["user_id", "stream_ordering"],
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "event_push_actions_highlights_index",
             index_name="event_push_actions_highlights_index",
             table="event_push_actions",
@@ -678,9 +680,9 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
                 " LIMIT ?" % (before_clause,)
             )
             txn.execute(sql, args)
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        push_actions = await self.db.runInteraction("get_push_actions_for_user", f)
+        push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f)
         for pa in push_actions:
             pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"])
         return push_actions
@@ -690,7 +692,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             txn.execute("SELECT MAX(stream_ordering) FROM event_push_actions")
             return txn.fetchone()
 
-        result = await self.db.runInteraction(
+        result = await self.db_pool.runInteraction(
             "get_latest_push_action_stream_ordering", f
         )
         return result[0] or 0
@@ -753,7 +755,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
             while True:
                 logger.info("Rotating notifications")
 
-                caught_up = await self.db.runInteraction(
+                caught_up = await self.db_pool.runInteraction(
                     "_rotate_notifs", self._rotate_notifs_txn
                 )
                 if caught_up:
@@ -767,7 +769,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         the archiving process has caught up or not.
         """
 
-        old_rotate_stream_ordering = self.db.simple_select_one_onecol_txn(
+        old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="event_push_summary_stream_ordering",
             keyvalues={},
@@ -803,7 +805,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         return caught_up
 
     def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering):
-        old_rotate_stream_ordering = self.db.simple_select_one_onecol_txn(
+        old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="event_push_summary_stream_ordering",
             keyvalues={},
@@ -835,7 +837,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
         # If the `old.user_id` above is NULL then we know there isn't already an
         # entry in the table, so we simply insert it. Otherwise we update the
         # existing table.
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_push_summary",
             values=[
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/databases/main/events.py
index 0c9c02afa1..4d8a24ce4b 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -32,8 +32,8 @@ from synapse.events import EventBase  # noqa: F401
 from synapse.events.snapshot import EventContext  # noqa: F401
 from synapse.logging.utils import log_function
 from synapse.storage._base import db_to_json, make_in_list_sql_clause
-from synapse.storage.data_stores.main.search import SearchEntry
-from synapse.storage.database import Database, LoggingTransaction
+from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.databases.main.search import SearchEntry
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.types import StateMap, get_domain_from_id
 from synapse.util.frozenutils import frozendict_json_encoder
@@ -41,7 +41,7 @@ from synapse.util.iterutils import batch_iter
 
 if TYPE_CHECKING:
     from synapse.server import HomeServer
-    from synapse.storage.data_stores.main import DataStore
+    from synapse.storage.databases.main import DataStore
 
 
 logger = logging.getLogger(__name__)
@@ -132,9 +132,11 @@ class PersistEventsStore:
     Note: This is not part of the `DataStore` mixin.
     """
 
-    def __init__(self, hs: "HomeServer", db: Database, main_data_store: "DataStore"):
+    def __init__(
+        self, hs: "HomeServer", db: DatabasePool, main_data_store: "DataStore"
+    ):
         self.hs = hs
-        self.db = db
+        self.db_pool = db
         self.store = main_data_store
         self.database_engine = db.engine
         self._clock = hs.get_clock()
@@ -207,7 +209,7 @@ class PersistEventsStore:
             for (event, context), stream in zip(events_and_contexts, stream_orderings):
                 event.internal_metadata.stream_ordering = stream
 
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "persist_events",
                 self._persist_events_txn,
                 events_and_contexts=events_and_contexts,
@@ -283,7 +285,7 @@ class PersistEventsStore:
             results.extend(r[0] for r in txn if not db_to_json(r[1]).get("soft_failed"))
 
         for chunk in batch_iter(event_ids, 100):
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
             )
 
@@ -347,7 +349,7 @@ class PersistEventsStore:
                         existing_prevs.add(prev_event_id)
 
         for chunk in batch_iter(event_ids, 100):
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
             )
 
@@ -421,7 +423,7 @@ class PersistEventsStore:
         # event's auth chain, but its easier for now just to store them (and
         # it doesn't take much storage compared to storing the entire event
         # anyway).
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_auth",
             values=[
@@ -484,7 +486,7 @@ class PersistEventsStore:
                 """
                 txn.execute(sql, (stream_id, room_id))
 
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn, table="current_state_events", keyvalues={"room_id": room_id},
                 )
             else:
@@ -632,7 +634,7 @@ class PersistEventsStore:
             creator = content.get("creator")
             room_version_id = content.get("room_version", RoomVersions.V1.identifier)
 
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="rooms",
                 keyvalues={"room_id": room_id},
@@ -644,14 +646,14 @@ class PersistEventsStore:
         self, txn, new_forward_extremities, max_stream_order
     ):
         for room_id, new_extrem in new_forward_extremities.items():
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
             )
             txn.call_after(
                 self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
             )
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_forward_extremities",
             values=[
@@ -664,7 +666,7 @@ class PersistEventsStore:
         # new stream_ordering to new forward extremeties in the room.
         # This allows us to later efficiently look up the forward extremeties
         # for a room before a given stream_ordering
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="stream_ordering_to_exterm",
             values=[
@@ -788,7 +790,7 @@ class PersistEventsStore:
                 # change in outlier status to our workers.
                 stream_order = event.internal_metadata.stream_ordering
                 state_group_id = context.state_group
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="ex_outlier_stream",
                     values={
@@ -826,7 +828,7 @@ class PersistEventsStore:
             d.pop("redacted_because", None)
             return d
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_json",
             values=[
@@ -843,7 +845,7 @@ class PersistEventsStore:
             ],
         )
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="events",
             values=[
@@ -873,7 +875,7 @@ class PersistEventsStore:
                 # If we're persisting an unredacted event we go and ensure
                 # that we mark any redactions that reference this event as
                 # requiring censoring.
-                self.db.simple_update_txn(
+                self.db_pool.simple_update_txn(
                     txn,
                     table="redactions",
                     keyvalues={"redacts": event.event_id},
@@ -1015,7 +1017,9 @@ class PersistEventsStore:
 
             state_values.append(vals)
 
-        self.db.simple_insert_many_txn(txn, table="state_events", values=state_values)
+        self.db_pool.simple_insert_many_txn(
+            txn, table="state_events", values=state_values
+        )
 
         # Prefill the event cache
         self._add_to_cache(txn, events_and_contexts)
@@ -1046,7 +1050,7 @@ class PersistEventsStore:
             )
 
             txn.execute(sql + clause, args)
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             for row in rows:
                 event = ev_map[row["event_id"]]
                 if not row["rejects"] and not row["redacts"]:
@@ -1066,7 +1070,7 @@ class PersistEventsStore:
         # invalidate the cache for the redacted event
         txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
 
-        self.db.simple_insert_txn(
+        self.db_pool.simple_insert_txn(
             txn,
             table="redactions",
             values={
@@ -1089,7 +1093,7 @@ class PersistEventsStore:
             room_id (str): The ID of the room the event was sent to.
             topological_ordering (int): The position of the event in the room's topology.
         """
-        return self.db.simple_insert_many_txn(
+        return self.db_pool.simple_insert_many_txn(
             txn=txn,
             table="event_labels",
             values=[
@@ -1111,7 +1115,7 @@ class PersistEventsStore:
             event_id (str): The event ID the expiry timestamp is associated with.
             expiry_ts (int): The timestamp at which to expire (delete) the event.
         """
-        return self.db.simple_insert_txn(
+        return self.db_pool.simple_insert_txn(
             txn=txn,
             table="event_expiry",
             values={"event_id": event_id, "expiry_ts": expiry_ts},
@@ -1135,12 +1139,14 @@ class PersistEventsStore:
                 }
             )
 
-        self.db.simple_insert_many_txn(txn, table="event_reference_hashes", values=vals)
+        self.db_pool.simple_insert_many_txn(
+            txn, table="event_reference_hashes", values=vals
+        )
 
     def _store_room_members_txn(self, txn, events, backfilled):
         """Store a room member in the database.
         """
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="room_memberships",
             values=[
@@ -1180,7 +1186,7 @@ class PersistEventsStore:
                 and event.internal_metadata.is_outlier()
                 and event.internal_metadata.is_out_of_band_membership()
             ):
-                self.db.simple_upsert_txn(
+                self.db_pool.simple_upsert_txn(
                     txn,
                     table="local_current_membership",
                     keyvalues={"room_id": event.room_id, "user_id": event.state_key},
@@ -1218,7 +1224,7 @@ class PersistEventsStore:
 
         aggregation_key = relation.get("key")
 
-        self.db.simple_insert_txn(
+        self.db_pool.simple_insert_txn(
             txn,
             table="event_relations",
             values={
@@ -1246,7 +1252,7 @@ class PersistEventsStore:
             redacted_event_id (str): The event that was redacted.
         """
 
-        self.db.simple_delete_txn(
+        self.db_pool.simple_delete_txn(
             txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
         )
 
@@ -1282,7 +1288,7 @@ class PersistEventsStore:
                 # Ignore the event if one of the value isn't an integer.
                 return
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn=txn,
                 table="room_retention",
                 values={
@@ -1363,7 +1369,7 @@ class PersistEventsStore:
             )
 
         for event, _ in events_and_contexts:
-            user_ids = self.db.simple_select_onecol_txn(
+            user_ids = self.db_pool.simple_select_onecol_txn(
                 txn,
                 table="event_push_actions_staging",
                 keyvalues={"event_id": event.event_id},
@@ -1395,7 +1401,7 @@ class PersistEventsStore:
         )
 
     def _store_rejections_txn(self, txn, event_id, reason):
-        self.db.simple_insert_txn(
+        self.db_pool.simple_insert_txn(
             txn,
             table="rejections",
             values={
@@ -1421,7 +1427,7 @@ class PersistEventsStore:
 
             state_groups[event.event_id] = context.state_group
 
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_to_state_groups",
             values=[
@@ -1443,7 +1449,7 @@ class PersistEventsStore:
         if min_depth is not None and depth >= min_depth:
             return
 
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             table="room_depth",
             keyvalues={"room_id": room_id},
@@ -1455,7 +1461,7 @@ class PersistEventsStore:
         For the given event, update the event edges table and forward and
         backward extremities tables.
         """
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="event_edges",
             values=[
diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 663c94b24f..35a0e09e3c 100644
--- a/synapse/storage/data_stores/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -19,7 +19,7 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventContentFields
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 
 logger = logging.getLogger(__name__)
 
@@ -30,18 +30,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
     EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
     DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(EventsBackgroundUpdatesStore, self).__init__(database, db_conn, hs)
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
             self._background_reindex_fields_sender,
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "event_contains_url_index",
             index_name="event_contains_url_index",
             table="events",
@@ -52,7 +52,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
         # an event_id index on event_search is useful for the purge_history
         # api. Plus it means we get to enforce some integrity with a UNIQUE
         # clause
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "event_search_event_id_idx",
             index_name="event_search_event_id_idx",
             table="event_search",
@@ -61,16 +61,16 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
             psql_only=True,
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "redactions_received_ts", self._redactions_received_ts
         )
 
         # This index gets deleted in `event_fix_redactions_bytes` update
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "event_fix_redactions_bytes_create_index",
             index_name="redactions_censored_redacts",
             table="redactions",
@@ -78,15 +78,15 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
             where_clause="have_censored",
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "event_fix_redactions_bytes", self._event_fix_redactions_bytes
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "event_store_labels", self._event_store_labels
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "redactions_have_censored_ts_idx",
             index_name="redactions_have_censored_ts",
             table="redactions",
@@ -149,18 +149,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 "rows_inserted": rows_inserted + len(rows),
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
             )
 
             return len(rows)
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
         )
 
         if not result:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
             )
 
@@ -195,7 +195,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
             for chunk in chunks:
-                ev_rows = self.db.simple_select_many_txn(
+                ev_rows = self.db_pool.simple_select_many_txn(
                     txn,
                     table="event_json",
                     column="event_id",
@@ -228,18 +228,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 "rows_inserted": rows_inserted + len(rows_to_update),
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
             )
 
             return len(rows_to_update)
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
         )
 
         if not result:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 self.EVENT_ORIGIN_SERVER_TS_NAME
             )
 
@@ -374,7 +374,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             to_delete.intersection_update(original_set)
 
-            deleted = self.db.simple_delete_many_txn(
+            deleted = self.db_pool.simple_delete_many_txn(
                 txn=txn,
                 table="event_forward_extremities",
                 column="event_id",
@@ -390,7 +390,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             if deleted:
                 # We now need to invalidate the caches of these rooms
-                rows = self.db.simple_select_many_txn(
+                rows = self.db_pool.simple_select_many_txn(
                     txn,
                     table="events",
                     column="event_id",
@@ -404,7 +404,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                         self.get_latest_event_ids_in_room.invalidate, (room_id,)
                     )
 
-            self.db.simple_delete_many_txn(
+            self.db_pool.simple_delete_many_txn(
                 txn=txn,
                 table="_extremities_to_check",
                 column="event_id",
@@ -414,19 +414,19 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             return len(original_set)
 
-        num_handled = yield self.db.runInteraction(
+        num_handled = yield self.db_pool.runInteraction(
             "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn
         )
 
         if not num_handled:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 self.DELETE_SOFT_FAILED_EXTREMITIES
             )
 
             def _drop_table_txn(txn):
                 txn.execute("DROP TABLE _extremities_to_check")
 
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_cleanup_extremities_bg_update_drop_table", _drop_table_txn
             )
 
@@ -474,18 +474,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, "redactions_received_ts", {"last_event_id": upper_event_id}
             )
 
             return len(rows)
 
-        count = yield self.db.runInteraction(
+        count = yield self.db_pool.runInteraction(
             "_redactions_received_ts", _redactions_received_ts_txn
         )
 
         if not count:
-            yield self.db.updates._end_background_update("redactions_received_ts")
+            yield self.db_pool.updates._end_background_update("redactions_received_ts")
 
         return count
 
@@ -511,11 +511,11 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
 
             txn.execute("DROP INDEX redactions_censored_redacts")
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "_event_fix_redactions_bytes", _event_fix_redactions_bytes_txn
         )
 
-        yield self.db.updates._end_background_update("event_fix_redactions_bytes")
+        yield self.db_pool.updates._end_background_update("event_fix_redactions_bytes")
 
         return 1
 
@@ -543,7 +543,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 try:
                     event_json = db_to_json(event_json_raw)
 
-                    self.db.simple_insert_many_txn(
+                    self.db_pool.simple_insert_many_txn(
                         txn=txn,
                         table="event_labels",
                         values=[
@@ -569,17 +569,17 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
                 nbrows += 1
                 last_row_event_id = event_id
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, "event_store_labels", {"last_event_id": last_row_event_id}
             )
 
             return nbrows
 
-        num_rows = yield self.db.runInteraction(
+        num_rows = yield self.db_pool.runInteraction(
             desc="event_store_labels", func=_event_store_labels_txn
         )
 
         if not num_rows:
-            yield self.db.updates._end_background_update("event_store_labels")
+            yield self.db_pool.updates._end_background_update("event_store_labels")
 
         return num_rows
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index b03b259636..a7b7393f6e 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -40,7 +40,7 @@ from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams import BackfillStream
 from synapse.replication.tcp.streams.events import EventsStream
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.types import Cursor
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.types import get_domain_from_id
@@ -80,7 +80,7 @@ class EventRedactBehaviour(Names):
 
 
 class EventsWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(EventsWorkerStore, self).__init__(database, db_conn, hs)
 
         if hs.config.worker.writers.events == hs.get_instance_name():
@@ -136,7 +136,7 @@ class EventsWorkerStore(SQLBaseStore):
             Deferred[int|None]: Timestamp in milliseconds, or None for events
             that were persisted before received_ts was implemented.
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="events",
             keyvalues={"event_id": event_id},
             retcol="received_ts",
@@ -175,7 +175,7 @@ class EventsWorkerStore(SQLBaseStore):
 
             return ts
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_approximate_received_ts", _get_approximate_received_ts_txn
         )
 
@@ -543,7 +543,7 @@ class EventsWorkerStore(SQLBaseStore):
                     event_id for events, _ in event_list for event_id in events
                 }
 
-                row_dict = self.db.new_transaction(
+                row_dict = self.db_pool.new_transaction(
                     conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
                 )
 
@@ -720,7 +720,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         if should_start:
             run_as_background_process(
-                "fetch_events", self.db.runWithConnection, self._do_fetch
+                "fetch_events", self.db_pool.runWithConnection, self._do_fetch
             )
 
         logger.debug("Loading %d events: %s", len(events), events)
@@ -889,7 +889,7 @@ class EventsWorkerStore(SQLBaseStore):
         """Given a list of event ids, check if we have already processed and
         stored them as non outliers.
         """
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="events",
             retcols=("event_id",),
             column="event_id",
@@ -924,7 +924,7 @@ class EventsWorkerStore(SQLBaseStore):
         # break the input up into chunks of 100
         input_iterator = iter(event_ids)
         for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)), []):
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "have_seen_events", have_seen_events_txn, chunk
             )
         return results
@@ -953,7 +953,7 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             Deferred[int]
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_total_state_event_counts",
             self._get_total_state_event_counts_txn,
             room_id,
@@ -978,7 +978,7 @@ class EventsWorkerStore(SQLBaseStore):
         Returns:
             Deferred[int]
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_current_state_event_counts",
             self._get_current_state_event_counts_txn,
             room_id,
@@ -1043,7 +1043,7 @@ class EventsWorkerStore(SQLBaseStore):
             txn.execute(sql, (last_id, current_id, limit))
             return txn.fetchall()
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_all_new_forward_event_rows", get_all_new_forward_event_rows
         )
 
@@ -1077,7 +1077,7 @@ class EventsWorkerStore(SQLBaseStore):
             txn.execute(sql, (last_id, current_id))
             return txn.fetchall()
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_ex_outlier_stream_rows", get_ex_outlier_stream_rows_txn
         )
 
@@ -1151,7 +1151,7 @@ class EventsWorkerStore(SQLBaseStore):
 
             return new_event_updates, upper_bound, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
         )
 
@@ -1199,7 +1199,7 @@ class EventsWorkerStore(SQLBaseStore):
         # we need to make sure that, for every stream id in the results, we get *all*
         # the rows with that stream id.
 
-        rows = await self.db.runInteraction(
+        rows = await self.db_pool.runInteraction(
             "get_all_updated_current_state_deltas",
             get_all_updated_current_state_deltas_txn,
         )  # type: List[Tuple]
@@ -1222,7 +1222,7 @@ class EventsWorkerStore(SQLBaseStore):
         # stream id. let's run the query again, without a row limit, but for
         # just one stream id.
         to_token += 1
-        rows = await self.db.runInteraction(
+        rows = await self.db_pool.runInteraction(
             "get_deltas_for_stream_id", get_deltas_for_stream_id_txn, to_token
         )
 
@@ -1317,7 +1317,7 @@ class EventsWorkerStore(SQLBaseStore):
                 backward_ex_outliers,
             )
 
-        return self.db.runInteraction("get_all_new_events", get_all_new_events_txn)
+        return self.db_pool.runInteraction("get_all_new_events", get_all_new_events_txn)
 
     async def is_event_after(self, event_id1, event_id2):
         """Returns True if event_id1 is after event_id2 in the stream
@@ -1328,7 +1328,7 @@ class EventsWorkerStore(SQLBaseStore):
 
     @cachedInlineCallbacks(max_entries=5000)
     def get_event_ordering(self, event_id):
-        res = yield self.db.simple_select_one(
+        res = yield self.db_pool.simple_select_one(
             table="events",
             retcols=["topological_ordering", "stream_ordering"],
             keyvalues={"event_id": event_id},
@@ -1360,7 +1360,7 @@ class EventsWorkerStore(SQLBaseStore):
 
             return txn.fetchone()
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
         )
 
@@ -1385,7 +1385,7 @@ class EventsWorkerStore(SQLBaseStore):
                 on_invalidate=cache_context.invalidate,
             )
 
-            return await self.db.runInteraction(
+            return await self.db_pool.runInteraction(
                 "get_unread_message_count_for_user",
                 self._get_unread_message_count_for_user_txn,
                 user_id,
@@ -1402,7 +1402,7 @@ class EventsWorkerStore(SQLBaseStore):
     ) -> int:
         if last_read_event_id:
             # Get the stream ordering for the last read event.
-            stream_ordering = self.db.simple_select_one_onecol_txn(
+            stream_ordering = self.db_pool.simple_select_one_onecol_txn(
                 txn=txn,
                 table="events",
                 keyvalues={"room_id": room_id, "event_id": last_read_event_id},
diff --git a/synapse/storage/data_stores/main/filtering.py b/synapse/storage/databases/main/filtering.py
index 342d6622a4..cae6bda80e 100644
--- a/synapse/storage/data_stores/main/filtering.py
+++ b/synapse/storage/databases/main/filtering.py
@@ -30,7 +30,7 @@ class FilteringStore(SQLBaseStore):
         except ValueError:
             raise SynapseError(400, "Invalid filter ID", Codes.INVALID_PARAM)
 
-        def_json = yield self.db.simple_select_one_onecol(
+        def_json = yield self.db_pool.simple_select_one_onecol(
             table="user_filters",
             keyvalues={"user_id": user_localpart, "filter_id": filter_id},
             retcol="filter_json",
@@ -71,4 +71,4 @@ class FilteringStore(SQLBaseStore):
 
             return filter_id
 
-        return self.db.runInteraction("add_user_filter", _do_txn)
+        return self.db_pool.runInteraction("add_user_filter", _do_txn)
diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/databases/main/group_server.py
index 01ff561e1a..a98181f445 100644
--- a/synapse/storage/data_stores/main/group_server.py
+++ b/synapse/storage/databases/main/group_server.py
@@ -31,7 +31,7 @@ _DEFAULT_ROLE_ID = ""
 
 class GroupServerWorkerStore(SQLBaseStore):
     def get_group(self, group_id):
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="groups",
             keyvalues={"group_id": group_id},
             retcols=(
@@ -53,7 +53,7 @@ class GroupServerWorkerStore(SQLBaseStore):
         if not include_private:
             keyvalues["is_public"] = True
 
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             table="group_users",
             keyvalues=keyvalues,
             retcols=("user_id", "is_public", "is_admin"),
@@ -63,7 +63,7 @@ class GroupServerWorkerStore(SQLBaseStore):
     def get_invited_users_in_group(self, group_id):
         # TODO: Pagination
 
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="group_invites",
             keyvalues={"group_id": group_id},
             retcol="user_id",
@@ -117,7 +117,9 @@ class GroupServerWorkerStore(SQLBaseStore):
                 for room_id, is_public in txn
             ]
 
-        return self.db.runInteraction("get_rooms_in_group", _get_rooms_in_group_txn)
+        return self.db_pool.runInteraction(
+            "get_rooms_in_group", _get_rooms_in_group_txn
+        )
 
     def get_rooms_for_summary_by_category(
         self, group_id: str, include_private: bool = False,
@@ -205,13 +207,13 @@ class GroupServerWorkerStore(SQLBaseStore):
 
             return rooms, categories
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_rooms_for_summary", _get_rooms_for_summary_txn
         )
 
     @defer.inlineCallbacks
     def get_group_categories(self, group_id):
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="group_room_categories",
             keyvalues={"group_id": group_id},
             retcols=("category_id", "is_public", "profile"),
@@ -228,7 +230,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_group_category(self, group_id, category_id):
-        category = yield self.db.simple_select_one(
+        category = yield self.db_pool.simple_select_one(
             table="group_room_categories",
             keyvalues={"group_id": group_id, "category_id": category_id},
             retcols=("is_public", "profile"),
@@ -241,7 +243,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_group_roles(self, group_id):
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="group_roles",
             keyvalues={"group_id": group_id},
             retcols=("role_id", "is_public", "profile"),
@@ -258,7 +260,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_group_role(self, group_id, role_id):
-        role = yield self.db.simple_select_one(
+        role = yield self.db_pool.simple_select_one(
             table="group_roles",
             keyvalues={"group_id": group_id, "role_id": role_id},
             retcols=("is_public", "profile"),
@@ -277,7 +279,7 @@ class GroupServerWorkerStore(SQLBaseStore):
             Deferred[list[str]]: A twisted.Deferred containing a list of group ids
                 containing this room
         """
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="group_rooms",
             keyvalues={"room_id": room_id},
             retcol="group_id",
@@ -341,12 +343,12 @@ class GroupServerWorkerStore(SQLBaseStore):
 
             return users, roles
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_users_for_summary_by_role", _get_users_for_summary_txn
         )
 
     def is_user_in_group(self, user_id, group_id):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="group_users",
             keyvalues={"group_id": group_id, "user_id": user_id},
             retcol="user_id",
@@ -355,7 +357,7 @@ class GroupServerWorkerStore(SQLBaseStore):
         ).addCallback(lambda r: bool(r))
 
     def is_user_admin_in_group(self, group_id, user_id):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="group_users",
             keyvalues={"group_id": group_id, "user_id": user_id},
             retcol="is_admin",
@@ -366,7 +368,7 @@ class GroupServerWorkerStore(SQLBaseStore):
     def is_user_invited_to_local_group(self, group_id, user_id):
         """Has the group server invited a user?
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="group_invites",
             keyvalues={"group_id": group_id, "user_id": user_id},
             retcol="user_id",
@@ -389,7 +391,7 @@ class GroupServerWorkerStore(SQLBaseStore):
         """
 
         def _get_users_membership_in_group_txn(txn):
-            row = self.db.simple_select_one_txn(
+            row = self.db_pool.simple_select_one_txn(
                 txn,
                 table="group_users",
                 keyvalues={"group_id": group_id, "user_id": user_id},
@@ -404,7 +406,7 @@ class GroupServerWorkerStore(SQLBaseStore):
                     "is_privileged": row["is_admin"],
                 }
 
-            row = self.db.simple_select_one_onecol_txn(
+            row = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="group_invites",
                 keyvalues={"group_id": group_id, "user_id": user_id},
@@ -417,14 +419,14 @@ class GroupServerWorkerStore(SQLBaseStore):
 
             return {}
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_users_membership_info_in_group", _get_users_membership_in_group_txn
         )
 
     def get_publicised_groups_for_user(self, user_id):
         """Get all groups a user is publicising
         """
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="local_group_membership",
             keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True},
             retcol="group_id",
@@ -441,9 +443,9 @@ class GroupServerWorkerStore(SQLBaseStore):
                 WHERE valid_until_ms <= ?
             """
             txn.execute(sql, (valid_until_ms,))
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_attestations_need_renewals", _get_attestations_need_renewals_txn
         )
 
@@ -452,7 +454,7 @@ class GroupServerWorkerStore(SQLBaseStore):
         """Get the attestation that proves the remote agrees that the user is
         in the group.
         """
-        row = yield self.db.simple_select_one(
+        row = yield self.db_pool.simple_select_one(
             table="group_attestations_remote",
             keyvalues={"group_id": group_id, "user_id": user_id},
             retcols=("valid_until_ms", "attestation_json"),
@@ -467,7 +469,7 @@ class GroupServerWorkerStore(SQLBaseStore):
         return None
 
     def get_joined_groups(self, user_id):
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="local_group_membership",
             keyvalues={"user_id": user_id, "membership": "join"},
             retcol="group_id",
@@ -494,7 +496,7 @@ class GroupServerWorkerStore(SQLBaseStore):
                 for row in txn
             ]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_all_groups_for_user", _get_all_groups_for_user_txn
         )
 
@@ -524,7 +526,7 @@ class GroupServerWorkerStore(SQLBaseStore):
                 for group_id, membership, gtype, content_json in txn
             ]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_groups_changes_for_user", _get_groups_changes_for_user_txn
         )
 
@@ -579,7 +581,7 @@ class GroupServerWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_groups_changes", _get_all_groups_changes_txn
         )
 
@@ -592,7 +594,7 @@ class GroupServerStore(GroupServerWorkerStore):
          * "invite"
          * "open"
         """
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="groups",
             keyvalues={"group_id": group_id},
             updatevalues={"join_policy": join_policy},
@@ -600,7 +602,7 @@ class GroupServerStore(GroupServerWorkerStore):
         )
 
     def add_room_to_summary(self, group_id, room_id, category_id, order, is_public):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "add_room_to_summary",
             self._add_room_to_summary_txn,
             group_id,
@@ -624,7 +626,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 an order of 1 will put the room first. Otherwise, the room gets
                 added to the end.
         """
-        room_in_group = self.db.simple_select_one_onecol_txn(
+        room_in_group = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="group_rooms",
             keyvalues={"group_id": group_id, "room_id": room_id},
@@ -637,7 +639,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if category_id is None:
             category_id = _DEFAULT_CATEGORY_ID
         else:
-            cat_exists = self.db.simple_select_one_onecol_txn(
+            cat_exists = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="group_room_categories",
                 keyvalues={"group_id": group_id, "category_id": category_id},
@@ -648,7 +650,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 raise SynapseError(400, "Category doesn't exist")
 
             # TODO: Check category is part of summary already
-            cat_exists = self.db.simple_select_one_onecol_txn(
+            cat_exists = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="group_summary_room_categories",
                 keyvalues={"group_id": group_id, "category_id": category_id},
@@ -668,7 +670,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     (group_id, category_id, group_id, category_id),
                 )
 
-        existing = self.db.simple_select_one_txn(
+        existing = self.db_pool.simple_select_one_txn(
             txn,
             table="group_summary_rooms",
             keyvalues={
@@ -701,7 +703,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 to_update["room_order"] = order
             if is_public is not None:
                 to_update["is_public"] = is_public
-            self.db.simple_update_txn(
+            self.db_pool.simple_update_txn(
                 txn,
                 table="group_summary_rooms",
                 keyvalues={
@@ -715,7 +717,7 @@ class GroupServerStore(GroupServerWorkerStore):
             if is_public is None:
                 is_public = True
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="group_summary_rooms",
                 values={
@@ -731,7 +733,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if category_id is None:
             category_id = _DEFAULT_CATEGORY_ID
 
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="group_summary_rooms",
             keyvalues={
                 "group_id": group_id,
@@ -757,7 +759,7 @@ class GroupServerStore(GroupServerWorkerStore):
         else:
             update_values["is_public"] = is_public
 
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="group_room_categories",
             keyvalues={"group_id": group_id, "category_id": category_id},
             values=update_values,
@@ -766,7 +768,7 @@ class GroupServerStore(GroupServerWorkerStore):
         )
 
     def remove_group_category(self, group_id, category_id):
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="group_room_categories",
             keyvalues={"group_id": group_id, "category_id": category_id},
             desc="remove_group_category",
@@ -788,7 +790,7 @@ class GroupServerStore(GroupServerWorkerStore):
         else:
             update_values["is_public"] = is_public
 
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="group_roles",
             keyvalues={"group_id": group_id, "role_id": role_id},
             values=update_values,
@@ -797,14 +799,14 @@ class GroupServerStore(GroupServerWorkerStore):
         )
 
     def remove_group_role(self, group_id, role_id):
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="group_roles",
             keyvalues={"group_id": group_id, "role_id": role_id},
             desc="remove_group_role",
         )
 
     def add_user_to_summary(self, group_id, user_id, role_id, order, is_public):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "add_user_to_summary",
             self._add_user_to_summary_txn,
             group_id,
@@ -828,7 +830,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 an order of 1 will put the user first. Otherwise, the user gets
                 added to the end.
         """
-        user_in_group = self.db.simple_select_one_onecol_txn(
+        user_in_group = self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="group_users",
             keyvalues={"group_id": group_id, "user_id": user_id},
@@ -841,7 +843,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if role_id is None:
             role_id = _DEFAULT_ROLE_ID
         else:
-            role_exists = self.db.simple_select_one_onecol_txn(
+            role_exists = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="group_roles",
                 keyvalues={"group_id": group_id, "role_id": role_id},
@@ -852,7 +854,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 raise SynapseError(400, "Role doesn't exist")
 
             # TODO: Check role is part of the summary already
-            role_exists = self.db.simple_select_one_onecol_txn(
+            role_exists = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="group_summary_roles",
                 keyvalues={"group_id": group_id, "role_id": role_id},
@@ -872,7 +874,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     (group_id, role_id, group_id, role_id),
                 )
 
-        existing = self.db.simple_select_one_txn(
+        existing = self.db_pool.simple_select_one_txn(
             txn,
             table="group_summary_users",
             keyvalues={"group_id": group_id, "user_id": user_id, "role_id": role_id},
@@ -901,7 +903,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 to_update["user_order"] = order
             if is_public is not None:
                 to_update["is_public"] = is_public
-            self.db.simple_update_txn(
+            self.db_pool.simple_update_txn(
                 txn,
                 table="group_summary_users",
                 keyvalues={
@@ -915,7 +917,7 @@ class GroupServerStore(GroupServerWorkerStore):
             if is_public is None:
                 is_public = True
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="group_summary_users",
                 values={
@@ -931,7 +933,7 @@ class GroupServerStore(GroupServerWorkerStore):
         if role_id is None:
             role_id = _DEFAULT_ROLE_ID
 
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="group_summary_users",
             keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id},
             desc="remove_user_from_summary",
@@ -940,7 +942,7 @@ class GroupServerStore(GroupServerWorkerStore):
     def add_group_invite(self, group_id, user_id):
         """Record that the group server has invited a user
         """
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="group_invites",
             values={"group_id": group_id, "user_id": user_id},
             desc="add_group_invite",
@@ -970,7 +972,7 @@ class GroupServerStore(GroupServerWorkerStore):
         """
 
         def _add_user_to_group_txn(txn):
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="group_users",
                 values={
@@ -981,14 +983,14 @@ class GroupServerStore(GroupServerWorkerStore):
                 },
             )
 
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_invites",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
 
             if local_attestation:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="group_attestations_renewals",
                     values={
@@ -998,7 +1000,7 @@ class GroupServerStore(GroupServerWorkerStore):
                     },
                 )
             if remote_attestation:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="group_attestations_remote",
                     values={
@@ -1009,49 +1011,49 @@ class GroupServerStore(GroupServerWorkerStore):
                     },
                 )
 
-        return self.db.runInteraction("add_user_to_group", _add_user_to_group_txn)
+        return self.db_pool.runInteraction("add_user_to_group", _add_user_to_group_txn)
 
     def remove_user_from_group(self, group_id, user_id):
         def _remove_user_from_group_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_users",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_invites",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_attestations_renewals",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_attestations_remote",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_summary_users",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "remove_user_from_group", _remove_user_from_group_txn
         )
 
     def add_room_to_group(self, group_id, room_id, is_public):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="group_rooms",
             values={"group_id": group_id, "room_id": room_id, "is_public": is_public},
             desc="add_room_to_group",
         )
 
     def update_room_in_group_visibility(self, group_id, room_id, is_public):
-        return self.db.simple_update(
+        return self.db_pool.simple_update(
             table="group_rooms",
             keyvalues={"group_id": group_id, "room_id": room_id},
             updatevalues={"is_public": is_public},
@@ -1060,26 +1062,26 @@ class GroupServerStore(GroupServerWorkerStore):
 
     def remove_room_from_group(self, group_id, room_id):
         def _remove_room_from_group_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_rooms",
                 keyvalues={"group_id": group_id, "room_id": room_id},
             )
 
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="group_summary_rooms",
                 keyvalues={"group_id": group_id, "room_id": room_id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "remove_room_from_group", _remove_room_from_group_txn
         )
 
     def update_group_publicity(self, group_id, user_id, publicise):
         """Update whether the user is publicising their membership of the group
         """
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="local_group_membership",
             keyvalues={"group_id": group_id, "user_id": user_id},
             updatevalues={"is_publicised": publicise},
@@ -1115,12 +1117,12 @@ class GroupServerStore(GroupServerWorkerStore):
 
         def _register_user_group_membership_txn(txn, next_id):
             # TODO: Upsert?
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="local_group_membership",
                 keyvalues={"group_id": group_id, "user_id": user_id},
             )
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="local_group_membership",
                 values={
@@ -1133,7 +1135,7 @@ class GroupServerStore(GroupServerWorkerStore):
                 },
             )
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="local_group_updates",
                 values={
@@ -1152,7 +1154,7 @@ class GroupServerStore(GroupServerWorkerStore):
 
             if membership == "join":
                 if local_attestation:
-                    self.db.simple_insert_txn(
+                    self.db_pool.simple_insert_txn(
                         txn,
                         table="group_attestations_renewals",
                         values={
@@ -1162,7 +1164,7 @@ class GroupServerStore(GroupServerWorkerStore):
                         },
                     )
                 if remote_attestation:
-                    self.db.simple_insert_txn(
+                    self.db_pool.simple_insert_txn(
                         txn,
                         table="group_attestations_remote",
                         values={
@@ -1173,12 +1175,12 @@ class GroupServerStore(GroupServerWorkerStore):
                         },
                     )
             else:
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn,
                     table="group_attestations_renewals",
                     keyvalues={"group_id": group_id, "user_id": user_id},
                 )
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn,
                     table="group_attestations_remote",
                     keyvalues={"group_id": group_id, "user_id": user_id},
@@ -1187,7 +1189,7 @@ class GroupServerStore(GroupServerWorkerStore):
             return next_id
 
         with self._group_updates_id_gen.get_next() as next_id:
-            res = yield self.db.runInteraction(
+            res = yield self.db_pool.runInteraction(
                 "register_user_group_membership",
                 _register_user_group_membership_txn,
                 next_id,
@@ -1198,7 +1200,7 @@ class GroupServerStore(GroupServerWorkerStore):
     def create_group(
         self, group_id, user_id, name, avatar_url, short_description, long_description
     ):
-        yield self.db.simple_insert(
+        yield self.db_pool.simple_insert(
             table="groups",
             values={
                 "group_id": group_id,
@@ -1213,7 +1215,7 @@ class GroupServerStore(GroupServerWorkerStore):
 
     @defer.inlineCallbacks
     def update_group_profile(self, group_id, profile):
-        yield self.db.simple_update_one(
+        yield self.db_pool.simple_update_one(
             table="groups",
             keyvalues={"group_id": group_id},
             updatevalues=profile,
@@ -1223,7 +1225,7 @@ class GroupServerStore(GroupServerWorkerStore):
     def update_attestation_renewal(self, group_id, user_id, attestation):
         """Update an attestation that we have renewed
         """
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="group_attestations_renewals",
             keyvalues={"group_id": group_id, "user_id": user_id},
             updatevalues={"valid_until_ms": attestation["valid_until_ms"]},
@@ -1233,7 +1235,7 @@ class GroupServerStore(GroupServerWorkerStore):
     def update_remote_attestion(self, group_id, user_id, attestation):
         """Update an attestation that a remote has renewed
         """
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="group_attestations_remote",
             keyvalues={"group_id": group_id, "user_id": user_id},
             updatevalues={
@@ -1252,7 +1254,7 @@ class GroupServerStore(GroupServerWorkerStore):
             group_id (str)
             user_id (str)
         """
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="group_attestations_renewals",
             keyvalues={"group_id": group_id, "user_id": user_id},
             desc="remove_attestation_renewal",
@@ -1288,8 +1290,8 @@ class GroupServerStore(GroupServerWorkerStore):
             ]
 
             for table in tables:
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn, table=table, keyvalues={"group_id": group_id}
                 )
 
-        return self.db.runInteraction("delete_group", _delete_group_txn)
+        return self.db_pool.runInteraction("delete_group", _delete_group_txn)
diff --git a/synapse/storage/data_stores/main/keys.py b/synapse/storage/databases/main/keys.py
index 4e1642a27a..384e9c5eb0 100644
--- a/synapse/storage/data_stores/main/keys.py
+++ b/synapse/storage/databases/main/keys.py
@@ -86,7 +86,7 @@ class KeyStore(SQLBaseStore):
                 _get_keys(txn, batch)
             return keys
 
-        return self.db.runInteraction("get_server_verify_keys", _txn)
+        return self.db_pool.runInteraction("get_server_verify_keys", _txn)
 
     def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys):
         """Stores NACL verification keys for remote servers.
@@ -121,9 +121,9 @@ class KeyStore(SQLBaseStore):
                 f((i,))
             return res
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "store_server_verify_keys",
-            self.db.simple_upsert_many_txn,
+            self.db_pool.simple_upsert_many_txn,
             table="server_signature_keys",
             key_names=("server_name", "key_id"),
             key_values=key_values,
@@ -151,7 +151,7 @@ class KeyStore(SQLBaseStore):
             ts_valid_until_ms (int): The time when this json stops being valid.
             key_json (bytes): The encoded JSON.
         """
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="server_keys_json",
             keyvalues={
                 "server_name": server_name,
@@ -190,7 +190,7 @@ class KeyStore(SQLBaseStore):
                     keyvalues["key_id"] = key_id
                 if from_server is not None:
                     keyvalues["from_server"] = from_server
-                rows = self.db.simple_select_list_txn(
+                rows = self.db_pool.simple_select_list_txn(
                     txn,
                     "server_keys_json",
                     keyvalues=keyvalues,
@@ -205,4 +205,6 @@ class KeyStore(SQLBaseStore):
                 results[(server_name, key_id, from_server)] = rows
             return results
 
-        return self.db.runInteraction("get_server_keys_json", _get_server_keys_json_txn)
+        return self.db_pool.runInteraction(
+            "get_server_keys_json", _get_server_keys_json_txn
+        )
diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 15bc13cbd0..80fc1cd009 100644
--- a/synapse/storage/data_stores/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -13,16 +13,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 
 
 class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(MediaRepositoryBackgroundUpdateStore, self).__init__(
             database, db_conn, hs
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             update_name="local_media_repository_url_idx",
             index_name="local_media_repository_url_idx",
             table="local_media_repository",
@@ -34,7 +34,7 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore):
 class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
     """Persistence for attachments and avatars"""
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(MediaRepositoryStore, self).__init__(database, db_conn, hs)
 
     def get_local_media(self, media_id):
@@ -42,7 +42,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         Returns:
             None if the media_id doesn't exist.
         """
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             "local_media_repository",
             {"media_id": media_id},
             (
@@ -67,7 +67,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         user_id,
         url_cache=None,
     ):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "local_media_repository",
             {
                 "media_id": media_id,
@@ -83,7 +83,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
     def mark_local_media_as_safe(self, media_id: str):
         """Mark a local media as safe from quarantining."""
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="local_media_repository",
             keyvalues={"media_id": media_id},
             updatevalues={"safe_from_quarantine": True},
@@ -136,12 +136,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
                 )
             )
 
-        return self.db.runInteraction("get_url_cache", get_url_cache_txn)
+        return self.db_pool.runInteraction("get_url_cache", get_url_cache_txn)
 
     def store_url_cache(
         self, url, response_code, etag, expires_ts, og, media_id, download_ts
     ):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "local_media_repository_url_cache",
             {
                 "url": url,
@@ -156,7 +156,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         )
 
     def get_local_media_thumbnails(self, media_id):
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             "local_media_repository_thumbnails",
             {"media_id": media_id},
             (
@@ -178,7 +178,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         thumbnail_method,
         thumbnail_length,
     ):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "local_media_repository_thumbnails",
             {
                 "media_id": media_id,
@@ -192,7 +192,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         )
 
     def get_cached_remote_media(self, origin, media_id):
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             "remote_media_cache",
             {"media_origin": origin, "media_id": media_id},
             (
@@ -217,7 +217,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         upload_name,
         filesystem_id,
     ):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "remote_media_cache",
             {
                 "media_origin": origin,
@@ -262,12 +262,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
             txn.executemany(sql, ((time_ms, media_id) for media_id in local_media))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "update_cached_last_access_time", update_cache_txn
         )
 
     def get_remote_media_thumbnails(self, origin, media_id):
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             "remote_media_cache_thumbnails",
             {"media_origin": origin, "media_id": media_id},
             (
@@ -292,7 +292,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         thumbnail_method,
         thumbnail_length,
     ):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "remote_media_cache_thumbnails",
             {
                 "media_origin": origin,
@@ -314,24 +314,26 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             " WHERE last_access_ts < ?"
         )
 
-        return self.db.execute(
-            "get_remote_media_before", self.db.cursor_to_dict, sql, before_ts
+        return self.db_pool.execute(
+            "get_remote_media_before", self.db_pool.cursor_to_dict, sql, before_ts
         )
 
     def delete_remote_media(self, media_origin, media_id):
         def delete_remote_media_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 "remote_media_cache",
                 keyvalues={"media_origin": media_origin, "media_id": media_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 "remote_media_cache_thumbnails",
                 keyvalues={"media_origin": media_origin, "media_id": media_id},
             )
 
-        return self.db.runInteraction("delete_remote_media", delete_remote_media_txn)
+        return self.db_pool.runInteraction(
+            "delete_remote_media", delete_remote_media_txn
+        )
 
     def get_expired_url_cache(self, now_ts):
         sql = (
@@ -345,7 +347,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             txn.execute(sql, (now_ts,))
             return [row[0] for row in txn]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_expired_url_cache", _get_expired_url_cache_txn
         )
 
@@ -358,7 +360,9 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
         def _delete_url_cache_txn(txn):
             txn.executemany(sql, [(media_id,) for media_id in media_ids])
 
-        return await self.db.runInteraction("delete_url_cache", _delete_url_cache_txn)
+        return await self.db_pool.runInteraction(
+            "delete_url_cache", _delete_url_cache_txn
+        )
 
     def get_url_cache_media_before(self, before_ts):
         sql = (
@@ -372,7 +376,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
             txn.execute(sql, (before_ts,))
             return [row[0] for row in txn]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_url_cache_media_before", _get_url_cache_media_before_txn
         )
 
@@ -389,6 +393,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
 
             txn.executemany(sql, [(media_id,) for media_id in media_ids])
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "delete_url_cache_media", _delete_url_cache_media_txn
         )
diff --git a/synapse/storage/data_stores/main/metrics.py b/synapse/storage/databases/main/metrics.py
index dad5bbc602..baa7a5092a 100644
--- a/synapse/storage/data_stores/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -20,10 +20,10 @@ from twisted.internet import defer
 from synapse.metrics import BucketCollector
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.event_push_actions import (
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.event_push_actions import (
     EventPushActionsWorkerStore,
 )
-from synapse.storage.database import Database
 
 
 class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
@@ -31,7 +31,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
     stats and prometheus metrics.
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super().__init__(database, db_conn, hs)
 
         # Collect metrics on the number of forward extremities that exist.
@@ -66,7 +66,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             )
             return txn.fetchall()
 
-        res = await self.db.runInteraction("read_forward_extremities", fetch)
+        res = await self.db_pool.runInteraction("read_forward_extremities", fetch)
         self._current_forward_extremities_amount = Counter([x[0] for x in res])
 
     @defer.inlineCallbacks
@@ -88,7 +88,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db.runInteraction("count_messages", _count_messages)
+        ret = yield self.db_pool.runInteraction("count_messages", _count_messages)
         return ret
 
     @defer.inlineCallbacks
@@ -109,7 +109,9 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db.runInteraction("count_daily_sent_messages", _count_messages)
+        ret = yield self.db_pool.runInteraction(
+            "count_daily_sent_messages", _count_messages
+        )
         return ret
 
     @defer.inlineCallbacks
@@ -124,5 +126,5 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db.runInteraction("count_daily_active_rooms", _count)
+        ret = yield self.db_pool.runInteraction("count_daily_active_rooms", _count)
         return ret
diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index e459cf49a0..02b01d9619 100644
--- a/synapse/storage/data_stores/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -18,7 +18,7 @@ from typing import List
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database, make_in_list_sql_clause
+from synapse.storage.database import DatabasePool, make_in_list_sql_clause
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -29,7 +29,7 @@ LAST_SEEN_GRANULARITY = 60 * 60 * 1000
 
 
 class MonthlyActiveUsersWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs)
         self._clock = hs.get_clock()
         self.hs = hs
@@ -48,7 +48,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        return self.db.runInteraction("count_users", _count_users)
+        return self.db_pool.runInteraction("count_users", _count_users)
 
     @cached(num_args=0)
     def get_monthly_active_count_by_service(self):
@@ -76,7 +76,9 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
             result = txn.fetchall()
             return dict(result)
 
-        return self.db.runInteraction("count_users_by_service", _count_users_by_service)
+        return self.db_pool.runInteraction(
+            "count_users_by_service", _count_users_by_service
+        )
 
     async def get_registered_reserved_users(self) -> List[str]:
         """Of the reserved threepids defined in config, retrieve those that are associated
@@ -109,7 +111,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
 
         """
 
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="monthly_active_users",
             keyvalues={"user_id": user_id},
             retcol="timestamp",
@@ -119,7 +121,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore):
 
 
 class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs)
 
         self._limit_usage_by_mau = hs.config.limit_usage_by_mau
@@ -128,7 +130,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
 
         # Do not add more reserved users than the total allowable number
         # cur = LoggingTransaction(
-        self.db.new_transaction(
+        self.db_pool.new_transaction(
             db_conn,
             "initialise_mau_threepids",
             [],
@@ -162,7 +164,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
                 is_support = self.is_support_user_txn(txn, user_id)
                 if not is_support:
                     # We do this manually here to avoid hitting #6791
-                    self.db.simple_upsert_txn(
+                    self.db_pool.simple_upsert_txn(
                         txn,
                         table="monthly_active_users",
                         keyvalues={"user_id": user_id},
@@ -246,7 +248,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
             self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ())
 
         reserved_users = await self.get_registered_reserved_users()
-        await self.db.runInteraction(
+        await self.db_pool.runInteraction(
             "reap_monthly_active_users", _reap_users, reserved_users
         )
 
@@ -273,7 +275,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         if is_support:
             return
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id
         )
 
@@ -303,7 +305,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore):
         # never be a big table and alternative approaches (batching multiple
         # upserts into a single txn) introduced a lot of extra complexity.
         # See https://github.com/matrix-org/synapse/issues/3854 for more
-        is_insert = self.db.simple_upsert_txn(
+        is_insert = self.db_pool.simple_upsert_txn(
             txn,
             table="monthly_active_users",
             keyvalues={"user_id": user_id},
diff --git a/synapse/storage/data_stores/main/openid.py b/synapse/storage/databases/main/openid.py
index cc21437e92..dcd1ff911a 100644
--- a/synapse/storage/data_stores/main/openid.py
+++ b/synapse/storage/databases/main/openid.py
@@ -3,7 +3,7 @@ from synapse.storage._base import SQLBaseStore
 
 class OpenIdStore(SQLBaseStore):
     def insert_open_id_token(self, token, ts_valid_until_ms, user_id):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="open_id_tokens",
             values={
                 "token": token,
@@ -28,6 +28,6 @@ class OpenIdStore(SQLBaseStore):
             else:
                 return rows[0][0]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_user_id_for_token", get_user_id_for_token_txn
         )
diff --git a/synapse/storage/data_stores/main/presence.py b/synapse/storage/databases/main/presence.py
index 7574612619..99e66dc6e9 100644
--- a/synapse/storage/data_stores/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -31,7 +31,7 @@ class PresenceStore(SQLBaseStore):
         )
 
         with stream_ordering_manager as stream_orderings:
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "update_presence",
                 self._update_presence_txn,
                 stream_orderings,
@@ -48,7 +48,7 @@ class PresenceStore(SQLBaseStore):
             txn.call_after(self._get_presence_for_user.invalidate, (state.user_id,))
 
         # Actually insert new rows
-        self.db.simple_insert_many_txn(
+        self.db_pool.simple_insert_many_txn(
             txn,
             table="presence_stream",
             values=[
@@ -124,7 +124,7 @@ class PresenceStore(SQLBaseStore):
 
             return updates, upper_bound, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_presence_updates", get_all_presence_updates_txn
         )
 
@@ -139,7 +139,7 @@ class PresenceStore(SQLBaseStore):
         inlineCallbacks=True,
     )
     def get_presence_for_users(self, user_ids):
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="presence_stream",
             column="user_id",
             iterable=user_ids,
@@ -165,7 +165,7 @@ class PresenceStore(SQLBaseStore):
         return self._presence_id_gen.get_current_token()
 
     def allow_presence_visible(self, observed_localpart, observer_userid):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="presence_allow_inbound",
             values={
                 "observed_user_id": observed_localpart,
@@ -176,7 +176,7 @@ class PresenceStore(SQLBaseStore):
         )
 
     def disallow_presence_visible(self, observed_localpart, observer_userid):
-        return self.db.simple_delete_one(
+        return self.db_pool.simple_delete_one(
             table="presence_allow_inbound",
             keyvalues={
                 "observed_user_id": observed_localpart,
diff --git a/synapse/storage/data_stores/main/profile.py b/synapse/storage/databases/main/profile.py
index bfc9369f0b..4a4f2cb385 100644
--- a/synapse/storage/data_stores/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -17,14 +17,14 @@ from twisted.internet import defer
 
 from synapse.api.errors import StoreError
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.roommember import ProfileInfo
+from synapse.storage.databases.main.roommember import ProfileInfo
 
 
 class ProfileWorkerStore(SQLBaseStore):
     @defer.inlineCallbacks
     def get_profileinfo(self, user_localpart):
         try:
-            profile = yield self.db.simple_select_one(
+            profile = yield self.db_pool.simple_select_one(
                 table="profiles",
                 keyvalues={"user_id": user_localpart},
                 retcols=("displayname", "avatar_url"),
@@ -42,7 +42,7 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     def get_profile_displayname(self, user_localpart):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="profiles",
             keyvalues={"user_id": user_localpart},
             retcol="displayname",
@@ -50,7 +50,7 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     def get_profile_avatar_url(self, user_localpart):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="profiles",
             keyvalues={"user_id": user_localpart},
             retcol="avatar_url",
@@ -58,7 +58,7 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     def get_from_remote_profile_cache(self, user_id):
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="remote_profile_cache",
             keyvalues={"user_id": user_id},
             retcols=("displayname", "avatar_url"),
@@ -67,12 +67,12 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     def create_profile(self, user_localpart):
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="profiles", values={"user_id": user_localpart}, desc="create_profile"
         )
 
     def set_profile_displayname(self, user_localpart, new_displayname):
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="profiles",
             keyvalues={"user_id": user_localpart},
             updatevalues={"displayname": new_displayname},
@@ -80,7 +80,7 @@ class ProfileWorkerStore(SQLBaseStore):
         )
 
     def set_profile_avatar_url(self, user_localpart, new_avatar_url):
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="profiles",
             keyvalues={"user_id": user_localpart},
             updatevalues={"avatar_url": new_avatar_url},
@@ -95,7 +95,7 @@ class ProfileStore(ProfileWorkerStore):
         This should only be called when `is_subscribed_remote_profile_for_user`
         would return true for the user.
         """
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="remote_profile_cache",
             keyvalues={"user_id": user_id},
             values={
@@ -107,7 +107,7 @@ class ProfileStore(ProfileWorkerStore):
         )
 
     def update_remote_profile_cache(self, user_id, displayname, avatar_url):
-        return self.db.simple_update(
+        return self.db_pool.simple_update(
             table="remote_profile_cache",
             keyvalues={"user_id": user_id},
             updatevalues={
@@ -125,7 +125,7 @@ class ProfileStore(ProfileWorkerStore):
         """
         subscribed = yield self.is_subscribed_remote_profile_for_user(user_id)
         if not subscribed:
-            yield self.db.simple_delete(
+            yield self.db_pool.simple_delete(
                 table="remote_profile_cache",
                 keyvalues={"user_id": user_id},
                 desc="delete_remote_profile_cache",
@@ -144,9 +144,9 @@ class ProfileStore(ProfileWorkerStore):
 
             txn.execute(sql, (last_checked,))
 
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_remote_profile_cache_entries_that_expire",
             _get_remote_profile_cache_entries_that_expire_txn,
         )
@@ -155,7 +155,7 @@ class ProfileStore(ProfileWorkerStore):
     def is_subscribed_remote_profile_for_user(self, user_id):
         """Check whether we are interested in a remote user's profile.
         """
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="group_users",
             keyvalues={"user_id": user_id},
             retcol="user_id",
@@ -166,7 +166,7 @@ class ProfileStore(ProfileWorkerStore):
         if res:
             return True
 
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="group_invites",
             keyvalues={"user_id": user_id},
             retcol="user_id",
diff --git a/synapse/storage/data_stores/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index b53fe35c33..3526b6fd66 100644
--- a/synapse/storage/data_stores/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -18,7 +18,7 @@ from typing import Any, Tuple
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
 from synapse.types import RoomStreamToken
 
 logger = logging.getLogger(__name__)
@@ -43,7 +43,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
             deleted events.
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "purge_history",
             self._purge_history_txn,
             room_id,
@@ -293,7 +293,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
             Deferred[List[int]]: The list of state groups to delete.
         """
 
-        return self.db.runInteraction("purge_room", self._purge_room_txn, room_id)
+        return self.db_pool.runInteraction("purge_room", self._purge_room_txn, room_id)
 
     def _purge_room_txn(self, txn, room_id):
         # First we fetch all the state groups that should be deleted, before
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 6b650f8ba8..5fd899326a 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -25,12 +25,12 @@ from twisted.internet import defer
 from synapse.push.baserules import list_with_base_rules
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.data_stores.main.appservice import ApplicationServiceWorkerStore
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.data_stores.main.pusher import PusherWorkerStore
-from synapse.storage.data_stores.main.receipts import ReceiptsWorkerStore
-from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.pusher import PusherWorkerStore
+from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
 from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
 from synapse.storage.util.id_generators import ChainedIdGenerator
 from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
@@ -79,7 +79,7 @@ class PushRulesWorkerStore(
     # the abstract methods being implemented.
     __metaclass__ = abc.ABCMeta
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(PushRulesWorkerStore, self).__init__(database, db_conn, hs)
 
         if hs.config.worker.worker_app is None:
@@ -91,7 +91,7 @@ class PushRulesWorkerStore(
                 db_conn, "push_rules_stream", "stream_id"
             )
 
-        push_rules_prefill, push_rules_id = self.db.get_cache_dict(
+        push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
             db_conn,
             "push_rules_stream",
             entity_column="user_id",
@@ -118,7 +118,7 @@ class PushRulesWorkerStore(
 
     @cachedInlineCallbacks(max_entries=5000)
     def get_push_rules_for_user(self, user_id):
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="push_rules",
             keyvalues={"user_name": user_id},
             retcols=(
@@ -144,7 +144,7 @@ class PushRulesWorkerStore(
 
     @cachedInlineCallbacks(max_entries=5000)
     def get_push_rules_enabled_for_user(self, user_id):
-        results = yield self.db.simple_select_list(
+        results = yield self.db_pool.simple_select_list(
             table="push_rules_enable",
             keyvalues={"user_name": user_id},
             retcols=("user_name", "rule_id", "enabled"),
@@ -166,7 +166,7 @@ class PushRulesWorkerStore(
                 (count,) = txn.fetchone()
                 return bool(count)
 
-            return self.db.runInteraction(
+            return self.db_pool.runInteraction(
                 "have_push_rules_changed", have_push_rules_changed_txn
             )
 
@@ -182,7 +182,7 @@ class PushRulesWorkerStore(
 
         results = {user_id: [] for user_id in user_ids}
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="push_rules",
             column="user_name",
             iterable=user_ids,
@@ -344,7 +344,7 @@ class PushRulesWorkerStore(
 
         results = {user_id: {} for user_id in user_ids}
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="push_rules_enable",
             column="user_name",
             iterable=user_ids,
@@ -402,7 +402,7 @@ class PushRulesWorkerStore(
 
             return updates, upper_bound, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_push_rule_updates", get_all_push_rule_updates_txn
         )
 
@@ -424,7 +424,7 @@ class PushRuleStore(PushRulesWorkerStore):
         with self._push_rules_stream_id_gen.get_next() as ids:
             stream_id, event_stream_ordering = ids
             if before or after:
-                yield self.db.runInteraction(
+                yield self.db_pool.runInteraction(
                     "_add_push_rule_relative_txn",
                     self._add_push_rule_relative_txn,
                     stream_id,
@@ -438,7 +438,7 @@ class PushRuleStore(PushRulesWorkerStore):
                     after,
                 )
             else:
-                yield self.db.runInteraction(
+                yield self.db_pool.runInteraction(
                     "_add_push_rule_highest_priority_txn",
                     self._add_push_rule_highest_priority_txn,
                     stream_id,
@@ -469,7 +469,7 @@ class PushRuleStore(PushRulesWorkerStore):
 
         relative_to_rule = before or after
 
-        res = self.db.simple_select_one_txn(
+        res = self.db_pool.simple_select_one_txn(
             txn,
             table="push_rules",
             keyvalues={"user_name": user_id, "rule_id": relative_to_rule},
@@ -592,7 +592,7 @@ class PushRuleStore(PushRulesWorkerStore):
             # We didn't update a row with the given rule_id so insert one
             push_rule_id = self._push_rule_id_gen.get_next()
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="push_rules",
                 values={
@@ -635,7 +635,7 @@ class PushRuleStore(PushRulesWorkerStore):
         """
 
         def delete_push_rule_txn(txn, stream_id, event_stream_ordering):
-            self.db.simple_delete_one_txn(
+            self.db_pool.simple_delete_one_txn(
                 txn, "push_rules", {"user_name": user_id, "rule_id": rule_id}
             )
 
@@ -645,7 +645,7 @@ class PushRuleStore(PushRulesWorkerStore):
 
         with self._push_rules_stream_id_gen.get_next() as ids:
             stream_id, event_stream_ordering = ids
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "delete_push_rule",
                 delete_push_rule_txn,
                 stream_id,
@@ -656,7 +656,7 @@ class PushRuleStore(PushRulesWorkerStore):
     def set_push_rule_enabled(self, user_id, rule_id, enabled):
         with self._push_rules_stream_id_gen.get_next() as ids:
             stream_id, event_stream_ordering = ids
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_set_push_rule_enabled_txn",
                 self._set_push_rule_enabled_txn,
                 stream_id,
@@ -670,7 +670,7 @@ class PushRuleStore(PushRulesWorkerStore):
         self, txn, stream_id, event_stream_ordering, user_id, rule_id, enabled
     ):
         new_id = self._push_rules_enable_id_gen.get_next()
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             "push_rules_enable",
             {"user_name": user_id, "rule_id": rule_id},
@@ -710,7 +710,7 @@ class PushRuleStore(PushRulesWorkerStore):
                     update_stream=False,
                 )
             else:
-                self.db.simple_update_one_txn(
+                self.db_pool.simple_update_one_txn(
                     txn,
                     "push_rules",
                     {"user_name": user_id, "rule_id": rule_id},
@@ -729,7 +729,7 @@ class PushRuleStore(PushRulesWorkerStore):
 
         with self._push_rules_stream_id_gen.get_next() as ids:
             stream_id, event_stream_ordering = ids
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "set_push_rule_actions",
                 set_push_rule_actions_txn,
                 stream_id,
@@ -749,7 +749,7 @@ class PushRuleStore(PushRulesWorkerStore):
         if data is not None:
             values.update(data)
 
-        self.db.simple_insert_txn(txn, "push_rules_stream", values=values)
+        self.db_pool.simple_insert_txn(txn, "push_rules_stream", values=values)
 
         txn.call_after(self.get_push_rules_for_user.invalidate, (user_id,))
         txn.call_after(self.get_push_rules_enabled_for_user.invalidate, (user_id,))
diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/databases/main/pusher.py
index e18f1ca87c..b5200fbe79 100644
--- a/synapse/storage/data_stores/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -50,7 +50,7 @@ class PusherWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def user_has_pusher(self, user_id):
-        ret = yield self.db.simple_select_one_onecol(
+        ret = yield self.db_pool.simple_select_one_onecol(
             "pushers", {"user_name": user_id}, "id", allow_none=True
         )
         return ret is not None
@@ -63,7 +63,7 @@ class PusherWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_pushers_by(self, keyvalues):
-        ret = yield self.db.simple_select_list(
+        ret = yield self.db_pool.simple_select_list(
             "pushers",
             keyvalues,
             [
@@ -91,11 +91,11 @@ class PusherWorkerStore(SQLBaseStore):
     def get_all_pushers(self):
         def get_pushers(txn):
             txn.execute("SELECT * FROM pushers")
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             return self._decode_pushers_rows(rows)
 
-        rows = yield self.db.runInteraction("get_all_pushers", get_pushers)
+        rows = yield self.db_pool.runInteraction("get_all_pushers", get_pushers)
         return rows
 
     async def get_all_updated_pushers_rows(
@@ -160,7 +160,7 @@ class PusherWorkerStore(SQLBaseStore):
 
             return updates, upper_bound, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_updated_pushers_rows", get_all_updated_pushers_rows_txn
         )
 
@@ -176,7 +176,7 @@ class PusherWorkerStore(SQLBaseStore):
         inlineCallbacks=True,
     )
     def get_if_users_have_pushers(self, user_ids):
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="pushers",
             column="user_name",
             iterable=user_ids,
@@ -193,7 +193,7 @@ class PusherWorkerStore(SQLBaseStore):
     def update_pusher_last_stream_ordering(
         self, app_id, pushkey, user_id, last_stream_ordering
     ):
-        yield self.db.simple_update_one(
+        yield self.db_pool.simple_update_one(
             "pushers",
             {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
             {"last_stream_ordering": last_stream_ordering},
@@ -216,7 +216,7 @@ class PusherWorkerStore(SQLBaseStore):
         Returns:
             Deferred[bool]: True if the pusher still exists; False if it has been deleted.
         """
-        updated = yield self.db.simple_update(
+        updated = yield self.db_pool.simple_update(
             table="pushers",
             keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
             updatevalues={
@@ -230,7 +230,7 @@ class PusherWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since):
-        yield self.db.simple_update(
+        yield self.db_pool.simple_update(
             table="pushers",
             keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
             updatevalues={"failing_since": failing_since},
@@ -239,7 +239,7 @@ class PusherWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def get_throttle_params_by_room(self, pusher_id):
-        res = yield self.db.simple_select_list(
+        res = yield self.db_pool.simple_select_list(
             "pusher_throttle",
             {"pusher": pusher_id},
             ["room_id", "last_sent_ts", "throttle_ms"],
@@ -259,7 +259,7 @@ class PusherWorkerStore(SQLBaseStore):
     def set_throttle_params(self, pusher_id, room_id, params):
         # no need to lock because `pusher_throttle` has a primary key on
         # (pusher, room_id) so simple_upsert will retry
-        yield self.db.simple_upsert(
+        yield self.db_pool.simple_upsert(
             "pusher_throttle",
             {"pusher": pusher_id, "room_id": room_id},
             params,
@@ -291,7 +291,7 @@ class PusherStore(PusherWorkerStore):
         with self._pushers_id_gen.get_next() as stream_id:
             # no need to lock because `pushers` has a unique key on
             # (app_id, pushkey, user_name) so simple_upsert will retry
-            yield self.db.simple_upsert(
+            yield self.db_pool.simple_upsert(
                 table="pushers",
                 keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
                 values={
@@ -316,7 +316,7 @@ class PusherStore(PusherWorkerStore):
 
             if user_has_pusher is not True:
                 # invalidate, since we the user might not have had a pusher before
-                yield self.db.runInteraction(
+                yield self.db_pool.runInteraction(
                     "add_pusher",
                     self._invalidate_cache_and_stream,
                     self.get_if_user_has_pusher,
@@ -330,7 +330,7 @@ class PusherStore(PusherWorkerStore):
                 txn, self.get_if_user_has_pusher, (user_id,)
             )
 
-            self.db.simple_delete_one_txn(
+            self.db_pool.simple_delete_one_txn(
                 txn,
                 "pushers",
                 {"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
@@ -339,7 +339,7 @@ class PusherStore(PusherWorkerStore):
             # it's possible for us to end up with duplicate rows for
             # (app_id, pushkey, user_id) at different stream_ids, but that
             # doesn't really matter.
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="deleted_pushers",
                 values={
@@ -351,4 +351,6 @@ class PusherStore(PusherWorkerStore):
             )
 
         with self._pushers_id_gen.get_next() as stream_id:
-            yield self.db.runInteraction("delete_pusher", delete_pusher_txn, stream_id)
+            yield self.db_pool.runInteraction(
+                "delete_pusher", delete_pusher_txn, stream_id
+            )
diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 1d723f2d34..6255977c92 100644
--- a/synapse/storage/data_stores/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -23,7 +23,7 @@ from canonicaljson import json
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.util.id_generators import StreamIdGenerator
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
@@ -41,7 +41,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
     # the abstract methods being implemented.
     __metaclass__ = abc.ABCMeta
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs)
 
         self._receipts_stream_cache = StreamChangeCache(
@@ -64,7 +64,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
     @cached(num_args=2)
     def get_receipts_for_room(self, room_id, receipt_type):
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             table="receipts_linearized",
             keyvalues={"room_id": room_id, "receipt_type": receipt_type},
             retcols=("user_id", "event_id"),
@@ -73,7 +73,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
     @cached(num_args=3)
     def get_last_receipt_event_id_for_user(self, user_id, room_id, receipt_type):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="receipts_linearized",
             keyvalues={
                 "room_id": room_id,
@@ -87,7 +87,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
     @cachedInlineCallbacks(num_args=2)
     def get_receipts_for_user(self, user_id, receipt_type):
-        rows = yield self.db.simple_select_list(
+        rows = yield self.db_pool.simple_select_list(
             table="receipts_linearized",
             keyvalues={"user_id": user_id, "receipt_type": receipt_type},
             retcols=("room_id", "event_id"),
@@ -111,7 +111,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
             txn.execute(sql, (user_id,))
             return txn.fetchall()
 
-        rows = yield self.db.runInteraction("get_receipts_for_user_with_orderings", f)
+        rows = yield self.db_pool.runInteraction(
+            "get_receipts_for_user_with_orderings", f
+        )
         return {
             row[0]: {
                 "event_id": row[1],
@@ -190,11 +192,11 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
                 txn.execute(sql, (room_id, to_key))
 
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             return rows
 
-        rows = yield self.db.runInteraction("get_linearized_receipts_for_room", f)
+        rows = yield self.db_pool.runInteraction("get_linearized_receipts_for_room", f)
 
         if not rows:
             return []
@@ -240,9 +242,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
                 txn.execute(sql + clause, [to_key] + list(args))
 
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        txn_results = yield self.db.runInteraction(
+        txn_results = yield self.db_pool.runInteraction(
             "_get_linearized_receipts_for_rooms", f
         )
 
@@ -288,7 +290,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
             return [r[0] for r in txn]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_users_sent_receipts_between", _get_users_sent_receipts_between_txn
         )
 
@@ -340,7 +342,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
             return updates, upper_bound, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_updated_receipts", get_all_updated_receipts_txn
         )
 
@@ -371,7 +373,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
 
 class ReceiptsStore(ReceiptsWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         # We instantiate this first as the ReceiptsWorkerStore constructor
         # needs to be able to call get_max_receipt_stream_id
         self._receipts_id_gen = StreamIdGenerator(
@@ -393,7 +395,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             otherwise, the rx timestamp of the event that the RR corresponds to
                 (or 0 if the event is unknown)
         """
-        res = self.db.simple_select_one_txn(
+        res = self.db_pool.simple_select_one_txn(
             txn,
             table="events",
             retcols=["stream_ordering", "received_ts"],
@@ -446,7 +448,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             (user_id, room_id, receipt_type),
         )
 
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             table="receipts_linearized",
             keyvalues={
@@ -506,13 +508,13 @@ class ReceiptsStore(ReceiptsWorkerStore):
                 else:
                     raise RuntimeError("Unrecognized event_ids: %r" % (event_ids,))
 
-            linearized_event_id = yield self.db.runInteraction(
+            linearized_event_id = yield self.db_pool.runInteraction(
                 "insert_receipt_conv", graph_to_linear
             )
 
         stream_id_manager = self._receipts_id_gen.get_next()
         with stream_id_manager as stream_id:
-            event_ts = yield self.db.runInteraction(
+            event_ts = yield self.db_pool.runInteraction(
                 "insert_linearized_receipt",
                 self.insert_linearized_receipt_txn,
                 room_id,
@@ -541,7 +543,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
         return stream_id, max_persisted_id
 
     def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "insert_graph_receipt",
             self.insert_graph_receipt_txn,
             room_id,
@@ -567,7 +569,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
             self._get_linearized_receipts_for_room.invalidate_many, (room_id,)
         )
 
-        self.db.simple_delete_txn(
+        self.db_pool.simple_delete_txn(
             txn,
             table="receipts_graph",
             keyvalues={
@@ -576,7 +578,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
                 "user_id": user_id,
             },
         )
-        self.db.simple_insert_txn(
+        self.db_pool.simple_insert_txn(
             txn,
             table="receipts_graph",
             values={
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/databases/main/registration.py
index 27d2c5028c..f618629e09 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -26,7 +26,7 @@ from synapse.api.constants import UserTypes
 from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.types import Cursor
 from synapse.storage.util.sequence import build_sequence_generator
 from synapse.types import UserID
@@ -38,7 +38,7 @@ logger = logging.getLogger(__name__)
 
 
 class RegistrationWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RegistrationWorkerStore, self).__init__(database, db_conn, hs)
 
         self.config = hs.config
@@ -50,7 +50,7 @@ class RegistrationWorkerStore(SQLBaseStore):
 
     @cached()
     def get_user_by_id(self, user_id):
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="users",
             keyvalues={"name": user_id},
             retcols=[
@@ -101,7 +101,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 including the keys `name`, `is_guest`, `device_id`, `token_id`,
                 `valid_until_ms`.
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_user_by_access_token", self._query_for_auth, token
         )
 
@@ -116,7 +116,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 otherwise int representation of the timestamp (as a number of
                 milliseconds since epoch).
         """
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"user_id": user_id},
             retcol="expiration_ts_ms",
@@ -144,7 +144,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         """
 
         def set_account_validity_for_user_txn(txn):
-            self.db.simple_update_txn(
+            self.db_pool.simple_update_txn(
                 txn=txn,
                 table="account_validity",
                 keyvalues={"user_id": user_id},
@@ -158,7 +158,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 txn, self.get_expiration_ts_for_user, (user_id,)
             )
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "set_account_validity_for_user", set_account_validity_for_user_txn
         )
 
@@ -174,7 +174,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Raises:
             StoreError: The provided token is already set for another user.
         """
-        yield self.db.simple_update_one(
+        yield self.db_pool.simple_update_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             updatevalues={"renewal_token": renewal_token},
@@ -191,7 +191,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             defer.Deferred[str]: The ID of the user to which the token belongs.
         """
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"renewal_token": renewal_token},
             retcol="user_id",
@@ -210,7 +210,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             defer.Deferred[str]: The renewal token associated with this user ID.
         """
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="account_validity",
             keyvalues={"user_id": user_id},
             retcol="renewal_token",
@@ -236,9 +236,9 @@ class RegistrationWorkerStore(SQLBaseStore):
             )
             values = [False, now_ms, renew_at]
             txn.execute(sql, values)
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        res = yield self.db.runInteraction(
+        res = yield self.db_pool.runInteraction(
             "get_users_expiring_soon",
             select_users_txn,
             self.clock.time_msec(),
@@ -257,7 +257,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             email_sent (bool): Flag which indicates whether a renewal email has been sent
                 to this user.
         """
-        yield self.db.simple_update_one(
+        yield self.db_pool.simple_update_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             updatevalues={"email_sent": email_sent},
@@ -272,7 +272,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Args:
             user_id (str): ID of the user to remove from the account validity table.
         """
-        yield self.db.simple_delete_one(
+        yield self.db_pool.simple_delete_one(
             table="account_validity",
             keyvalues={"user_id": user_id},
             desc="delete_account_validity_for_user",
@@ -287,7 +287,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns (bool):
             true iff the user is a server admin, false otherwise.
         """
-        res = await self.db.simple_select_one_onecol(
+        res = await self.db_pool.simple_select_one_onecol(
             table="users",
             keyvalues={"name": user.to_string()},
             retcol="admin",
@@ -307,14 +307,14 @@ class RegistrationWorkerStore(SQLBaseStore):
         """
 
         def set_server_admin_txn(txn):
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0}
             )
             self._invalidate_cache_and_stream(
                 txn, self.get_user_by_id, (user.to_string(),)
             )
 
-        return self.db.runInteraction("set_server_admin", set_server_admin_txn)
+        return self.db_pool.runInteraction("set_server_admin", set_server_admin_txn)
 
     def _query_for_auth(self, txn, token):
         sql = (
@@ -326,7 +326,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         )
 
         txn.execute(sql, (token,))
-        rows = self.db.cursor_to_dict(txn)
+        rows = self.db_pool.cursor_to_dict(txn)
         if rows:
             return rows[0]
 
@@ -342,7 +342,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             Deferred[bool]: True if user 'user_type' is null or empty string
         """
-        res = yield self.db.runInteraction(
+        res = yield self.db_pool.runInteraction(
             "is_real_user", self.is_real_user_txn, user_id
         )
         return res
@@ -357,12 +357,12 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             Deferred[bool]: True if user is of type UserTypes.SUPPORT
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "is_support_user", self.is_support_user_txn, user_id
         )
 
     def is_real_user_txn(self, txn, user_id):
-        res = self.db.simple_select_one_onecol_txn(
+        res = self.db_pool.simple_select_one_onecol_txn(
             txn=txn,
             table="users",
             keyvalues={"name": user_id},
@@ -372,7 +372,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         return res is None
 
     def is_support_user_txn(self, txn, user_id):
-        res = self.db.simple_select_one_onecol_txn(
+        res = self.db_pool.simple_select_one_onecol_txn(
             txn=txn,
             table="users",
             keyvalues={"name": user_id},
@@ -391,7 +391,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             txn.execute(sql, (user_id,))
             return dict(txn)
 
-        return self.db.runInteraction("get_users_by_id_case_insensitive", f)
+        return self.db_pool.runInteraction("get_users_by_id_case_insensitive", f)
 
     async def get_user_by_external_id(
         self, auth_provider: str, external_id: str
@@ -405,7 +405,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             str|None: the mxid of the user, or None if they are not known
         """
-        return await self.db.simple_select_one_onecol(
+        return await self.db_pool.simple_select_one_onecol(
             table="user_external_ids",
             keyvalues={"auth_provider": auth_provider, "external_id": external_id},
             retcol="user_id",
@@ -419,12 +419,12 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         def _count_users(txn):
             txn.execute("SELECT COUNT(*) AS users FROM users")
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if rows:
                 return rows[0]["users"]
             return 0
 
-        ret = yield self.db.runInteraction("count_users", _count_users)
+        ret = yield self.db_pool.runInteraction("count_users", _count_users)
         return ret
 
     def count_daily_user_type(self):
@@ -456,7 +456,9 @@ class RegistrationWorkerStore(SQLBaseStore):
                 results[row[0]] = row[1]
             return results
 
-        return self.db.runInteraction("count_daily_user_type", _count_daily_user_type)
+        return self.db_pool.runInteraction(
+            "count_daily_user_type", _count_daily_user_type
+        )
 
     @defer.inlineCallbacks
     def count_nonbridged_users(self):
@@ -470,7 +472,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             (count,) = txn.fetchone()
             return count
 
-        ret = yield self.db.runInteraction("count_users", _count_users)
+        ret = yield self.db_pool.runInteraction("count_users", _count_users)
         return ret
 
     @defer.inlineCallbacks
@@ -479,12 +481,12 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         def _count_users(txn):
             txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null")
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if rows:
                 return rows[0]["users"]
             return 0
 
-        ret = yield self.db.runInteraction("count_real_users", _count_users)
+        ret = yield self.db_pool.runInteraction("count_real_users", _count_users)
         return ret
 
     async def generate_user_id(self) -> str:
@@ -492,7 +494,7 @@ class RegistrationWorkerStore(SQLBaseStore):
 
         Returns: a (hopefully) free localpart
         """
-        next_id = await self.db.runInteraction(
+        next_id = await self.db_pool.runInteraction(
             "generate_user_id", self._user_id_seq.get_next_id_txn
         )
 
@@ -508,7 +510,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             The user ID or None if no user id/threepid mapping exists
         """
-        user_id = await self.db.runInteraction(
+        user_id = await self.db_pool.runInteraction(
             "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address
         )
         return user_id
@@ -524,7 +526,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             str|None: user id or None if no user id/threepid mapping exists
         """
-        ret = self.db.simple_select_one_txn(
+        ret = self.db_pool.simple_select_one_txn(
             txn,
             "user_threepids",
             {"medium": medium, "address": address},
@@ -537,7 +539,7 @@ class RegistrationWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
-        yield self.db.simple_upsert(
+        yield self.db_pool.simple_upsert(
             "user_threepids",
             {"medium": medium, "address": address},
             {"user_id": user_id, "validated_at": validated_at, "added_at": added_at},
@@ -545,7 +547,7 @@ class RegistrationWorkerStore(SQLBaseStore):
 
     @defer.inlineCallbacks
     def user_get_threepids(self, user_id):
-        ret = yield self.db.simple_select_list(
+        ret = yield self.db_pool.simple_select_list(
             "user_threepids",
             {"user_id": user_id},
             ["medium", "address", "validated_at", "added_at"],
@@ -554,7 +556,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         return ret
 
     def user_delete_threepid(self, user_id, medium, address):
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             "user_threepids",
             keyvalues={"user_id": user_id, "medium": medium, "address": address},
             desc="user_delete_threepid",
@@ -567,7 +569,7 @@ class RegistrationWorkerStore(SQLBaseStore):
              user_id: The user id to delete all threepids of
 
         """
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             "user_threepids",
             keyvalues={"user_id": user_id},
             desc="user_delete_threepids",
@@ -589,7 +591,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         """
         # We need to use an upsert, in case they user had already bound the
         # threepid
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="user_threepid_id_server",
             keyvalues={
                 "user_id": user_id,
@@ -615,7 +617,7 @@ class RegistrationWorkerStore(SQLBaseStore):
                 medium (str): The medium of the threepid (e.g "email")
                 address (str): The address of the threepid (e.g "bob@example.com")
         """
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             table="user_threepid_id_server",
             keyvalues={"user_id": user_id},
             retcols=["medium", "address"],
@@ -636,7 +638,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             Deferred
         """
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             table="user_threepid_id_server",
             keyvalues={
                 "user_id": user_id,
@@ -659,7 +661,7 @@ class RegistrationWorkerStore(SQLBaseStore):
         Returns:
             Deferred[list[str]]: Resolves to a list of identity servers
         """
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="user_threepid_id_server",
             keyvalues={"user_id": user_id, "medium": medium, "address": address},
             retcol="id_server",
@@ -677,7 +679,7 @@ class RegistrationWorkerStore(SQLBaseStore):
             defer.Deferred(bool): The requested value.
         """
 
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="users",
             keyvalues={"name": user_id},
             retcol="deactivated",
@@ -744,13 +746,13 @@ class RegistrationWorkerStore(SQLBaseStore):
             sql += " LIMIT 1"
 
             txn.execute(sql, list(keyvalues.values()))
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if not rows:
                 return None
 
             return rows[0]
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_threepid_validation_session", get_threepid_validation_session_txn
         )
 
@@ -764,37 +766,37 @@ class RegistrationWorkerStore(SQLBaseStore):
         """
 
         def delete_threepid_session_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="threepid_validation_token",
                 keyvalues={"session_id": session_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="threepid_validation_session",
                 keyvalues={"session_id": session_id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_threepid_session", delete_threepid_session_txn
         )
 
 
 class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RegistrationBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
         self.clock = hs.get_clock()
         self.config = hs.config
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "access_tokens_device_index",
             index_name="access_tokens_device_id",
             table="access_tokens",
             columns=["user_id", "device_id"],
         )
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "users_creation_ts",
             index_name="users_creation_ts",
             table="users",
@@ -804,13 +806,15 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
         # we no longer use refresh tokens, but it's possible that some people
         # might have a background update queued to build this index. Just
         # clear the background update.
-        self.db.updates.register_noop_background_update("refresh_tokens_device_index")
+        self.db_pool.updates.register_noop_background_update(
+            "refresh_tokens_device_index"
+        )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "user_threepids_grandfather", self._bg_user_threepids_grandfather
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "users_set_deactivated_flag", self._background_update_set_deactivated_flag
         )
 
@@ -843,7 +847,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
                 (last_user, batch_size),
             )
 
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             if not rows:
                 return True, 0
@@ -857,7 +861,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
 
             logger.info("Marked %d rows as deactivated", rows_processed_nb)
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]}
             )
 
@@ -866,12 +870,14 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
             else:
                 return False, len(rows)
 
-        end, nb_processed = yield self.db.runInteraction(
+        end, nb_processed = yield self.db_pool.runInteraction(
             "users_set_deactivated_flag", _background_update_set_deactivated_flag_txn
         )
 
         if end:
-            yield self.db.updates._end_background_update("users_set_deactivated_flag")
+            yield self.db_pool.updates._end_background_update(
+                "users_set_deactivated_flag"
+            )
 
         return nb_processed
 
@@ -897,17 +903,17 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
             txn.executemany(sql, [(id_server,) for id_server in id_servers])
 
         if id_servers:
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "_bg_user_threepids_grandfather", _bg_user_threepids_grandfather_txn
             )
 
-        yield self.db.updates._end_background_update("user_threepids_grandfather")
+        yield self.db_pool.updates._end_background_update("user_threepids_grandfather")
 
         return 1
 
 
 class RegistrationStore(RegistrationBackgroundUpdateStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RegistrationStore, self).__init__(database, db_conn, hs)
 
         self._account_validity = hs.config.account_validity
@@ -947,7 +953,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         """
         next_id = self._access_tokens_id_gen.get_next()
 
-        yield self.db.simple_insert(
+        yield self.db_pool.simple_insert(
             "access_tokens",
             {
                 "id": next_id,
@@ -992,7 +998,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         Returns:
             Deferred
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "register_user",
             self._register_user,
             user_id,
@@ -1026,7 +1032,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                 # Ensure that the guest user actually exists
                 # ``allow_none=False`` makes this raise an exception
                 # if the row isn't in the database.
-                self.db.simple_select_one_txn(
+                self.db_pool.simple_select_one_txn(
                     txn,
                     "users",
                     keyvalues={"name": user_id, "is_guest": 1},
@@ -1034,7 +1040,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                     allow_none=False,
                 )
 
-                self.db.simple_update_one_txn(
+                self.db_pool.simple_update_one_txn(
                     txn,
                     "users",
                     keyvalues={"name": user_id, "is_guest": 1},
@@ -1048,7 +1054,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                     },
                 )
             else:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     "users",
                     values={
@@ -1103,7 +1109,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             external_id: id on that system
             user_id: complete mxid that it is mapped to
         """
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="user_external_ids",
             values={
                 "auth_provider": auth_provider,
@@ -1121,12 +1127,12 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         """
 
         def user_set_password_hash_txn(txn):
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn, "users", {"name": user_id}, {"password_hash": password_hash}
             )
             self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "user_set_password_hash", user_set_password_hash_txn
         )
 
@@ -1143,7 +1149,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         """
 
         def f(txn):
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn,
                 table="users",
                 keyvalues={"name": user_id},
@@ -1151,7 +1157,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             )
             self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
 
-        return self.db.runInteraction("user_set_consent_version", f)
+        return self.db_pool.runInteraction("user_set_consent_version", f)
 
     def user_set_consent_server_notice_sent(self, user_id, consent_version):
         """Updates the user table to record that we have sent the user a server
@@ -1167,7 +1173,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         """
 
         def f(txn):
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn,
                 table="users",
                 keyvalues={"name": user_id},
@@ -1175,7 +1181,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             )
             self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
 
-        return self.db.runInteraction("user_set_consent_server_notice_sent", f)
+        return self.db_pool.runInteraction("user_set_consent_server_notice_sent", f)
 
     def user_delete_access_tokens(self, user_id, except_token_id=None, device_id=None):
         """
@@ -1221,11 +1227,11 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
 
             return tokens_and_devices
 
-        return self.db.runInteraction("user_delete_access_tokens", f)
+        return self.db_pool.runInteraction("user_delete_access_tokens", f)
 
     def delete_access_token(self, access_token):
         def f(txn):
-            self.db.simple_delete_one_txn(
+            self.db_pool.simple_delete_one_txn(
                 txn, table="access_tokens", keyvalues={"token": access_token}
             )
 
@@ -1233,11 +1239,11 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                 txn, self.get_user_by_access_token, (access_token,)
             )
 
-        return self.db.runInteraction("delete_access_token", f)
+        return self.db_pool.runInteraction("delete_access_token", f)
 
     @cachedInlineCallbacks()
     def is_guest(self, user_id):
-        res = yield self.db.simple_select_one_onecol(
+        res = yield self.db_pool.simple_select_one_onecol(
             table="users",
             keyvalues={"name": user_id},
             retcol="is_guest",
@@ -1252,7 +1258,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         Adds a user to the table of users who need to be parted from all the rooms they're
         in
         """
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             "users_pending_deactivation",
             values={"user_id": user_id},
             desc="add_user_pending_deactivation",
@@ -1265,7 +1271,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         """
         # XXX: This should be simple_delete_one but we failed to put a unique index on
         # the table, so somehow duplicate entries have ended up in it.
-        return self.db.simple_delete(
+        return self.db_pool.simple_delete(
             "users_pending_deactivation",
             keyvalues={"user_id": user_id},
             desc="del_user_pending_deactivation",
@@ -1276,7 +1282,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         Gets one user from the table of users waiting to be parted from all the rooms
         they're in.
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             "users_pending_deactivation",
             keyvalues={},
             retcol="user_id",
@@ -1306,7 +1312,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
 
         # Insert everything into a transaction in order to run atomically
         def validate_threepid_session_txn(txn):
-            row = self.db.simple_select_one_txn(
+            row = self.db_pool.simple_select_one_txn(
                 txn,
                 table="threepid_validation_session",
                 keyvalues={"session_id": session_id},
@@ -1324,7 +1330,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                     400, "This client_secret does not match the provided session_id"
                 )
 
-            row = self.db.simple_select_one_txn(
+            row = self.db_pool.simple_select_one_txn(
                 txn,
                 table="threepid_validation_token",
                 keyvalues={"session_id": session_id, "token": token},
@@ -1349,7 +1355,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                 )
 
             # Looks good. Validate the session
-            self.db.simple_update_txn(
+            self.db_pool.simple_update_txn(
                 txn,
                 table="threepid_validation_session",
                 keyvalues={"session_id": session_id},
@@ -1359,7 +1365,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             return next_link
 
         # Return next_link if it exists
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "validate_threepid_session_txn", validate_threepid_session_txn
         )
 
@@ -1392,7 +1398,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         if validated_at:
             insertion_values["validated_at"] = validated_at
 
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="threepid_validation_session",
             keyvalues={"session_id": session_id},
             values={"last_send_attempt": send_attempt},
@@ -1430,7 +1436,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
 
         def start_or_continue_validation_session_txn(txn):
             # Create or update a validation session
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="threepid_validation_session",
                 keyvalues={"session_id": session_id},
@@ -1443,7 +1449,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             )
 
             # Create a new validation token with this session ID
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="threepid_validation_token",
                 values={
@@ -1454,7 +1460,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                 },
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "start_or_continue_validation_session",
             start_or_continue_validation_session_txn,
         )
@@ -1469,7 +1475,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             """
             return txn.execute(sql, (ts,))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "cull_expired_threepid_validation_tokens",
             cull_expired_threepid_validation_tokens_txn,
             self.clock.time_msec(),
@@ -1484,7 +1490,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             deactivated (bool): The value to set for `deactivated`.
         """
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "set_user_deactivated_status",
             self.set_user_deactivated_status_txn,
             user_id,
@@ -1492,7 +1498,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
         )
 
     def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
-        self.db.simple_update_one_txn(
+        self.db_pool.simple_update_one_txn(
             txn=txn,
             table="users",
             keyvalues={"name": user_id},
@@ -1520,14 +1526,14 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
             )
             txn.execute(sql, [])
 
-            res = self.db.cursor_to_dict(txn)
+            res = self.db_pool.cursor_to_dict(txn)
             if res:
                 for user in res:
                     self.set_expiration_date_for_user_txn(
                         txn, user["name"], use_delta=True
                     )
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "get_users_with_no_expiration_date",
             select_users_with_no_expiration_date_txn,
         )
@@ -1551,7 +1557,7 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
                 expiration_ts,
             )
 
-        self.db.simple_upsert_txn(
+        self.db_pool.simple_upsert_txn(
             txn,
             "account_validity",
             keyvalues={"user_id": user_id},
diff --git a/synapse/storage/data_stores/main/rejections.py b/synapse/storage/databases/main/rejections.py
index 27e5a2084a..cf9ba51205 100644
--- a/synapse/storage/data_stores/main/rejections.py
+++ b/synapse/storage/databases/main/rejections.py
@@ -22,7 +22,7 @@ logger = logging.getLogger(__name__)
 
 class RejectionsStore(SQLBaseStore):
     def get_rejection_reason(self, event_id):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="rejections",
             retcol="reason",
             keyvalues={"event_id": event_id},
diff --git a/synapse/storage/data_stores/main/relations.py b/synapse/storage/databases/main/relations.py
index 7d477f8d01..b81f1449b7 100644
--- a/synapse/storage/data_stores/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -19,7 +19,7 @@ import attr
 
 from synapse.api.constants import RelationTypes
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.stream import generate_pagination_where_clause
+from synapse.storage.databases.main.stream import generate_pagination_where_clause
 from synapse.storage.relations import (
     AggregationPaginationToken,
     PaginationChunk,
@@ -129,7 +129,7 @@ class RelationsWorkerStore(SQLBaseStore):
                 chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_recent_references_for_event", _get_recent_references_for_event_txn
         )
 
@@ -223,7 +223,7 @@ class RelationsWorkerStore(SQLBaseStore):
                 chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
         )
 
@@ -268,7 +268,7 @@ class RelationsWorkerStore(SQLBaseStore):
             if row:
                 return row[0]
 
-        edit_id = yield self.db.runInteraction(
+        edit_id = yield self.db_pool.runInteraction(
             "get_applicable_edit", _get_applicable_edit_txn
         )
 
@@ -318,7 +318,7 @@ class RelationsWorkerStore(SQLBaseStore):
 
             return bool(txn.fetchone())
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_if_user_has_annotated_event", _get_if_user_has_annotated_event
         )
 
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/databases/main/room.py
index ab48052cdc..f4008e6221 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -27,8 +27,8 @@ from synapse.api.constants import EventTypes
 from synapse.api.errors import StoreError
 from synapse.api.room_versions import RoomVersion, RoomVersions
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.data_stores.main.search import SearchStore
-from synapse.storage.database import Database, LoggingTransaction
+from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.databases.main.search import SearchStore
 from synapse.types import ThirdPartyInstanceID
 from synapse.util.caches.descriptors import cached
 
@@ -73,7 +73,7 @@ class RoomSortOrder(Enum):
 
 
 class RoomWorkerStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomWorkerStore, self).__init__(database, db_conn, hs)
 
         self.config = hs.config
@@ -86,7 +86,7 @@ class RoomWorkerStore(SQLBaseStore):
         Returns:
             A dict containing the room information, or None if the room is unknown.
         """
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="rooms",
             keyvalues={"room_id": room_id},
             retcols=("room_id", "is_public", "creator"),
@@ -118,7 +118,7 @@ class RoomWorkerStore(SQLBaseStore):
             txn.execute(sql, [room_id])
             # Catch error if sql returns empty result to return "None" instead of an error
             try:
-                res = self.db.cursor_to_dict(txn)[0]
+                res = self.db_pool.cursor_to_dict(txn)[0]
             except IndexError:
                 return None
 
@@ -126,12 +126,12 @@ class RoomWorkerStore(SQLBaseStore):
             res["public"] = bool(res["public"])
             return res
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_room_with_stats", get_room_with_stats_txn, room_id
         )
 
     def get_public_room_ids(self):
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="rooms",
             keyvalues={"is_public": True},
             retcol="room_id",
@@ -188,7 +188,9 @@ class RoomWorkerStore(SQLBaseStore):
             txn.execute(sql, query_args)
             return txn.fetchone()[0]
 
-        return self.db.runInteraction("count_public_rooms", _count_public_rooms_txn)
+        return self.db_pool.runInteraction(
+            "count_public_rooms", _count_public_rooms_txn
+        )
 
     async def get_largest_public_rooms(
         self,
@@ -320,21 +322,21 @@ class RoomWorkerStore(SQLBaseStore):
         def _get_largest_public_rooms_txn(txn):
             txn.execute(sql, query_args)
 
-            results = self.db.cursor_to_dict(txn)
+            results = self.db_pool.cursor_to_dict(txn)
 
             if not forwards:
                 results.reverse()
 
             return results
 
-        ret_val = await self.db.runInteraction(
+        ret_val = await self.db_pool.runInteraction(
             "get_largest_public_rooms", _get_largest_public_rooms_txn
         )
         return ret_val
 
     @cached(max_entries=10000)
     def is_room_blocked(self, room_id):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="blocked_rooms",
             keyvalues={"room_id": room_id},
             retcol="1",
@@ -502,7 +504,7 @@ class RoomWorkerStore(SQLBaseStore):
             room_count = txn.fetchone()
             return rooms, room_count[0]
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_rooms_paginate", _get_rooms_paginate_txn,
         )
 
@@ -519,7 +521,7 @@ class RoomWorkerStore(SQLBaseStore):
             of RatelimitOverride are None or 0 then ratelimitng has been
             disabled for that user entirely.
         """
-        row = await self.db.simple_select_one(
+        row = await self.db_pool.simple_select_one(
             table="ratelimit_override",
             keyvalues={"user_id": user_id},
             retcols=("messages_per_second", "burst_count"),
@@ -561,9 +563,9 @@ class RoomWorkerStore(SQLBaseStore):
                 (room_id,),
             )
 
-            return self.db.cursor_to_dict(txn)
+            return self.db_pool.cursor_to_dict(txn)
 
-        ret = await self.db.runInteraction(
+        ret = await self.db_pool.runInteraction(
             "get_retention_policy_for_room", get_retention_policy_for_room_txn,
         )
 
@@ -613,7 +615,7 @@ class RoomWorkerStore(SQLBaseStore):
 
             return local_media_mxcs, remote_media_mxcs
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_media_ids_in_room", _get_media_mxcs_in_room_txn
         )
 
@@ -630,7 +632,7 @@ class RoomWorkerStore(SQLBaseStore):
                 txn, local_mxcs, remote_mxcs, quarantined_by
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "quarantine_media_in_room", _quarantine_media_in_room_txn
         )
 
@@ -714,7 +716,7 @@ class RoomWorkerStore(SQLBaseStore):
                 txn, local_mxcs, remote_mxcs, quarantined_by
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "quarantine_media_by_user", _quarantine_media_by_id_txn
         )
 
@@ -730,7 +732,7 @@ class RoomWorkerStore(SQLBaseStore):
             local_media_ids = self._get_media_ids_by_user_txn(txn, user_id)
             return self._quarantine_media_txn(txn, local_media_ids, [], quarantined_by)
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "quarantine_media_by_user", _quarantine_media_by_user_txn
         )
 
@@ -848,7 +850,7 @@ class RoomWorkerStore(SQLBaseStore):
 
             return updates, upto_token, limited
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_all_new_public_rooms", get_all_new_public_rooms
         )
 
@@ -857,21 +859,21 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
     REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
     ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
         self.config = hs.config
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "insert_room_retention", self._background_insert_retention,
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE,
             self._remove_tombstoned_rooms_from_directory,
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.ADD_ROOMS_ROOM_VERSION_COLUMN,
             self._background_add_rooms_room_version_column,
         )
@@ -900,7 +902,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
                 (last_room, batch_size),
             )
 
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
 
             if not rows:
                 return True
@@ -912,7 +914,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
                     ev = db_to_json(row["json"])
                     retention_policy = ev["content"]
 
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn=txn,
                     table="room_retention",
                     values={
@@ -925,7 +927,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
 
             logger.info("Inserted %d rows into room_retention", len(rows))
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]}
             )
 
@@ -934,12 +936,12 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             else:
                 return False
 
-        end = await self.db.runInteraction(
+        end = await self.db_pool.runInteraction(
             "insert_room_retention", _background_insert_retention_txn,
         )
 
         if end:
-            await self.db.updates._end_background_update("insert_room_retention")
+            await self.db_pool.updates._end_background_update("insert_room_retention")
 
         return batch_size
 
@@ -983,7 +985,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
                 # mainly for paranoia as much badness would happen if we don't
                 # insert the row and then try and get the room version for the
                 # room.
-                self.db.simple_upsert_txn(
+                self.db_pool.simple_upsert_txn(
                     txn,
                     table="rooms",
                     keyvalues={"room_id": room_id},
@@ -992,19 +994,19 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
                 )
                 new_last_room_id = room_id
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.ADD_ROOMS_ROOM_VERSION_COLUMN, {"room_id": new_last_room_id}
             )
 
             return False
 
-        end = await self.db.runInteraction(
+        end = await self.db_pool.runInteraction(
             "_background_add_rooms_room_version_column",
             _background_add_rooms_room_version_column_txn,
         )
 
         if end:
-            await self.db.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.ADD_ROOMS_ROOM_VERSION_COLUMN
             )
 
@@ -1038,12 +1040,12 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
 
             return [row[0] for row in txn]
 
-        rooms = await self.db.runInteraction(
+        rooms = await self.db_pool.runInteraction(
             "get_tombstoned_directory_rooms", _get_rooms
         )
 
         if not rooms:
-            await self.db.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE
             )
             return 0
@@ -1052,7 +1054,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
             logger.info("Removing tombstoned room %s from the directory", room_id)
             await self.set_room_is_public(room_id, False)
 
-        await self.db.updates._background_update_progress(
+        await self.db_pool.updates._background_update_progress(
             self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE, {"room_id": rooms[-1]}
         )
 
@@ -1068,7 +1070,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
 
 
 class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomStore, self).__init__(database, db_conn, hs)
 
         self.config = hs.config
@@ -1079,7 +1081,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
         Called when we join a room over federation, and overwrites any room version
         currently in the table.
         """
-        await self.db.simple_upsert(
+        await self.db_pool.simple_upsert(
             desc="upsert_room_on_join",
             table="rooms",
             keyvalues={"room_id": room_id},
@@ -1111,7 +1113,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
         try:
 
             def store_room_txn(txn, next_id):
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     "rooms",
                     {
@@ -1122,7 +1124,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                     },
                 )
                 if is_public:
-                    self.db.simple_insert_txn(
+                    self.db_pool.simple_insert_txn(
                         txn,
                         table="public_room_list_stream",
                         values={
@@ -1133,7 +1135,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                     )
 
             with self._public_room_id_gen.get_next() as next_id:
-                await self.db.runInteraction("store_room_txn", store_room_txn, next_id)
+                await self.db_pool.runInteraction(
+                    "store_room_txn", store_room_txn, next_id
+                )
         except Exception as e:
             logger.error("store_room with room_id=%s failed: %s", room_id, e)
             raise StoreError(500, "Problem creating room.")
@@ -1143,7 +1147,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
         When we receive an invite over federation, store the version of the room if we
         don't already know the room version.
         """
-        await self.db.simple_upsert(
+        await self.db_pool.simple_upsert(
             desc="maybe_store_room_on_invite",
             table="rooms",
             keyvalues={"room_id": room_id},
@@ -1160,14 +1164,14 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
     async def set_room_is_public(self, room_id, is_public):
         def set_room_is_public_txn(txn, next_id):
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn,
                 table="rooms",
                 keyvalues={"room_id": room_id},
                 updatevalues={"is_public": is_public},
             )
 
-            entries = self.db.simple_select_list_txn(
+            entries = self.db_pool.simple_select_list_txn(
                 txn,
                 table="public_room_list_stream",
                 keyvalues={
@@ -1185,7 +1189,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 add_to_stream = bool(entries[-1]["visibility"]) != is_public
 
             if add_to_stream:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="public_room_list_stream",
                     values={
@@ -1198,7 +1202,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 )
 
         with self._public_room_id_gen.get_next() as next_id:
-            await self.db.runInteraction(
+            await self.db_pool.runInteraction(
                 "set_room_is_public", set_room_is_public_txn, next_id
             )
         self.hs.get_notifier().on_new_replication_data()
@@ -1224,7 +1228,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
         def set_room_is_public_appservice_txn(txn, next_id):
             if is_public:
                 try:
-                    self.db.simple_insert_txn(
+                    self.db_pool.simple_insert_txn(
                         txn,
                         table="appservice_room_list",
                         values={
@@ -1237,7 +1241,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                     # We've already inserted, nothing to do.
                     return
             else:
-                self.db.simple_delete_txn(
+                self.db_pool.simple_delete_txn(
                     txn,
                     table="appservice_room_list",
                     keyvalues={
@@ -1247,7 +1251,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                     },
                 )
 
-            entries = self.db.simple_select_list_txn(
+            entries = self.db_pool.simple_select_list_txn(
                 txn,
                 table="public_room_list_stream",
                 keyvalues={
@@ -1265,7 +1269,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 add_to_stream = bool(entries[-1]["visibility"]) != is_public
 
             if add_to_stream:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="public_room_list_stream",
                     values={
@@ -1278,7 +1282,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
                 )
 
         with self._public_room_id_gen.get_next() as next_id:
-            await self.db.runInteraction(
+            await self.db_pool.runInteraction(
                 "set_room_is_public_appservice",
                 set_room_is_public_appservice_txn,
                 next_id,
@@ -1295,13 +1299,13 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
             row = txn.fetchone()
             return row[0] or 0
 
-        return self.db.runInteraction("get_rooms", f)
+        return self.db_pool.runInteraction("get_rooms", f)
 
     def add_event_report(
         self, room_id, event_id, user_id, reason, content, received_ts
     ):
         next_id = self._event_reports_id_gen.get_next()
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="event_reports",
             values={
                 "id": next_id,
@@ -1325,14 +1329,14 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
             room_id: Room to block
             user_id: Who blocked it
         """
-        await self.db.simple_upsert(
+        await self.db_pool.simple_upsert(
             table="blocked_rooms",
             keyvalues={"room_id": room_id},
             values={},
             insertion_values={"user_id": user_id},
             desc="block_room",
         )
-        await self.db.runInteraction(
+        await self.db_pool.runInteraction(
             "block_room_invalidation",
             self._invalidate_cache_and_stream,
             self.is_room_blocked,
@@ -1388,7 +1392,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
             txn.execute(sql, args)
 
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             rooms_dict = {}
 
             for row in rows:
@@ -1404,7 +1408,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
                 txn.execute(sql)
 
-                rows = self.db.cursor_to_dict(txn)
+                rows = self.db_pool.cursor_to_dict(txn)
 
                 # If a room isn't already in the dict (i.e. it doesn't have a retention
                 # policy in its state), add it with a null policy.
@@ -1417,7 +1421,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
 
             return rooms_dict
 
-        rooms = await self.db.runInteraction(
+        rooms = await self.db_pool.runInteraction(
             "get_rooms_for_retention_period_in_range",
             get_rooms_for_retention_period_in_range_txn,
         )
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/databases/main/roommember.py
index a92e401e88..7c5be251bd 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -28,8 +28,8 @@ from synapse.storage._base import (
     db_to_json,
     make_in_list_sql_clause,
 )
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.engines import Sqlite3Engine
 from synapse.storage.roommember import (
     GetRoomsForUserWithStreamOrdering,
@@ -51,7 +51,7 @@ _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership"
 
 
 class RoomMemberWorkerStore(EventsWorkerStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomMemberWorkerStore, self).__init__(database, db_conn, hs)
 
         # Is the current_state_events.membership up to date? Or is the
@@ -116,7 +116,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             txn.execute(query)
             return list(txn)[0][0]
 
-        count = yield self.db.runInteraction("get_known_servers", _transact)
+        count = yield self.db_pool.runInteraction("get_known_servers", _transact)
 
         # We always know about ourselves, even if we have nothing in
         # room_memberships (for example, the server is new).
@@ -128,7 +128,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         membership column is up to date
         """
 
-        pending_update = self.db.simple_select_one_txn(
+        pending_update = self.db_pool.simple_select_one_txn(
             txn,
             table="background_updates",
             keyvalues={"update_name": _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME},
@@ -144,14 +144,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
                 15.0,
                 run_as_background_process,
                 "_check_safe_current_state_events_membership_updated",
-                self.db.runInteraction,
+                self.db_pool.runInteraction,
                 "_check_safe_current_state_events_membership_updated",
                 self._check_safe_current_state_events_membership_updated_txn,
             )
 
     @cached(max_entries=100000, iterable=True)
     def get_users_in_room(self, room_id):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_users_in_room", self.get_users_in_room_txn, room_id
         )
 
@@ -259,7 +259,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
             return res
 
-        return self.db.runInteraction("get_room_summary", _get_room_summary_txn)
+        return self.db_pool.runInteraction("get_room_summary", _get_room_summary_txn)
 
     def _get_user_counts_in_room_txn(self, txn, room_id):
         """
@@ -332,7 +332,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         if not membership_list:
             return defer.succeed(None)
 
-        rooms = yield self.db.runInteraction(
+        rooms = yield self.db_pool.runInteraction(
             "get_rooms_for_local_user_where_membership_is",
             self._get_rooms_for_local_user_where_membership_is_txn,
             user_id,
@@ -369,7 +369,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         )
 
         txn.execute(sql, (user_id, *args))
-        results = [RoomsForUser(**r) for r in self.db.cursor_to_dict(txn)]
+        results = [RoomsForUser(**r) for r in self.db_pool.cursor_to_dict(txn)]
 
         return results
 
@@ -388,7 +388,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             the rooms the user is in currently, along with the stream ordering
             of the most recent join for that user and room.
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_rooms_for_user_with_stream_ordering",
             self._get_rooms_for_user_with_stream_ordering_txn,
             user_id,
@@ -453,7 +453,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
             return {row[0] for row in txn}
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "get_users_server_still_shares_room_with",
             _get_users_server_still_shares_room_with_txn,
         )
@@ -624,7 +624,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             to `user_id` and ProfileInfo (or None if not join event).
         """
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="room_memberships",
             column="event_id",
             iterable=event_ids,
@@ -664,7 +664,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         # the returned user actually has the correct domain.
         like_clause = "%:" + host
 
-        rows = yield self.db.execute("is_host_joined", None, sql, room_id, like_clause)
+        rows = yield self.db_pool.execute(
+            "is_host_joined", None, sql, room_id, like_clause
+        )
 
         if not rows:
             return False
@@ -704,7 +706,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         # the returned user actually has the correct domain.
         like_clause = "%:" + host
 
-        rows = yield self.db.execute("was_host_joined", None, sql, room_id, like_clause)
+        rows = yield self.db_pool.execute(
+            "was_host_joined", None, sql, room_id, like_clause
+        )
 
         if not rows:
             return False
@@ -774,7 +778,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             rows = txn.fetchall()
             return rows[0][0]
 
-        count = yield self.db.runInteraction("did_forget_membership", f)
+        count = yield self.db_pool.runInteraction("did_forget_membership", f)
         return count == 0
 
     @cached()
@@ -811,7 +815,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             txn.execute(sql, (user_id,))
             return {row[0] for row in txn if row[1] == 0}
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
         )
 
@@ -826,7 +830,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
             Deferred[set[str]]: Set of room IDs.
         """
 
-        room_ids = yield self.db.simple_select_onecol(
+        room_ids = yield self.db_pool.simple_select_onecol(
             table="room_memberships",
             keyvalues={"membership": Membership.JOIN, "user_id": user_id},
             retcol="room_id",
@@ -841,7 +845,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         """Get user_id and membership of a set of event IDs.
         """
 
-        return self.db.simple_select_many_batch(
+        return self.db_pool.simple_select_many_batch(
             table="room_memberships",
             column="event_id",
             iterable=member_event_ids,
@@ -877,23 +881,23 @@ class RoomMemberWorkerStore(EventsWorkerStore):
 
             return bool(txn.fetchone())
 
-        return await self.db.runInteraction(
+        return await self.db_pool.runInteraction(
             "is_local_host_in_room_ignoring_users",
             _is_local_host_in_room_ignoring_users_txn,
         )
 
 
 class RoomMemberBackgroundUpdateStore(SQLBaseStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomMemberBackgroundUpdateStore, self).__init__(database, db_conn, hs)
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
             self._background_current_state_membership,
         )
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             "room_membership_forgotten_idx",
             index_name="room_memberships_user_room_forgotten",
             table="room_memberships",
@@ -926,7 +930,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
 
             txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
 
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if not rows:
                 return 0
 
@@ -961,18 +965,18 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
                 "max_stream_id_exclusive": min_stream_id,
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, _MEMBERSHIP_PROFILE_UPDATE_NAME, progress
             )
 
             return len(rows)
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             _MEMBERSHIP_PROFILE_UPDATE_NAME, add_membership_profile_txn
         )
 
         if not result:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 _MEMBERSHIP_PROFILE_UPDATE_NAME
             )
 
@@ -1013,7 +1017,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
 
                 last_processed_room = next_room
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn,
                 _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
                 {"last_processed_room": last_processed_room},
@@ -1025,14 +1029,14 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
         # string, which will compare before all room IDs correctly.
         last_processed_room = progress.get("last_processed_room", "")
 
-        row_count, finished = yield self.db.runInteraction(
+        row_count, finished = yield self.db_pool.runInteraction(
             "_background_current_state_membership_update",
             _background_current_state_membership_txn,
             last_processed_room,
         )
 
         if finished:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME
             )
 
@@ -1040,7 +1044,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
 
 
 class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(RoomMemberStore, self).__init__(database, db_conn, hs)
 
     def forget(self, user_id, room_id):
@@ -1064,7 +1068,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
                 txn, self.get_forgotten_rooms_for_user, (user_id,)
             )
 
-        return self.db.runInteraction("forget_membership", f)
+        return self.db_pool.runInteraction("forget_membership", f)
 
 
 class _JoinedHostsCache(object):
diff --git a/synapse/storage/data_stores/main/schema/delta/12/v12.sql b/synapse/storage/databases/main/schema/delta/12/v12.sql
index 5964c5aaac..5964c5aaac 100644
--- a/synapse/storage/data_stores/main/schema/delta/12/v12.sql
+++ b/synapse/storage/databases/main/schema/delta/12/v12.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/13/v13.sql b/synapse/storage/databases/main/schema/delta/13/v13.sql
index f8649e5d99..f8649e5d99 100644
--- a/synapse/storage/data_stores/main/schema/delta/13/v13.sql
+++ b/synapse/storage/databases/main/schema/delta/13/v13.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/14/v14.sql b/synapse/storage/databases/main/schema/delta/14/v14.sql
index a831920da6..a831920da6 100644
--- a/synapse/storage/data_stores/main/schema/delta/14/v14.sql
+++ b/synapse/storage/databases/main/schema/delta/14/v14.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/15/appservice_txns.sql b/synapse/storage/databases/main/schema/delta/15/appservice_txns.sql
index e4f5e76aec..e4f5e76aec 100644
--- a/synapse/storage/data_stores/main/schema/delta/15/appservice_txns.sql
+++ b/synapse/storage/databases/main/schema/delta/15/appservice_txns.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/15/presence_indices.sql b/synapse/storage/databases/main/schema/delta/15/presence_indices.sql
index 6b8d0f1ca7..6b8d0f1ca7 100644
--- a/synapse/storage/data_stores/main/schema/delta/15/presence_indices.sql
+++ b/synapse/storage/databases/main/schema/delta/15/presence_indices.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/15/v15.sql b/synapse/storage/databases/main/schema/delta/15/v15.sql
index 9523d2bcc3..9523d2bcc3 100644
--- a/synapse/storage/data_stores/main/schema/delta/15/v15.sql
+++ b/synapse/storage/databases/main/schema/delta/15/v15.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/events_order_index.sql b/synapse/storage/databases/main/schema/delta/16/events_order_index.sql
index a48f215170..a48f215170 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/events_order_index.sql
+++ b/synapse/storage/databases/main/schema/delta/16/events_order_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/remote_media_cache_index.sql b/synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql
index 7a15265cb1..7a15265cb1 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/remote_media_cache_index.sql
+++ b/synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/remove_duplicates.sql b/synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql
index 65c97b5e2f..65c97b5e2f 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/remove_duplicates.sql
+++ b/synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/room_alias_index.sql b/synapse/storage/databases/main/schema/delta/16/room_alias_index.sql
index f82486132b..f82486132b 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/room_alias_index.sql
+++ b/synapse/storage/databases/main/schema/delta/16/room_alias_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/unique_constraints.sql b/synapse/storage/databases/main/schema/delta/16/unique_constraints.sql
index 5b8de52c33..5b8de52c33 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/unique_constraints.sql
+++ b/synapse/storage/databases/main/schema/delta/16/unique_constraints.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/16/users.sql b/synapse/storage/databases/main/schema/delta/16/users.sql
index cd0709250d..cd0709250d 100644
--- a/synapse/storage/data_stores/main/schema/delta/16/users.sql
+++ b/synapse/storage/databases/main/schema/delta/16/users.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/17/drop_indexes.sql b/synapse/storage/databases/main/schema/delta/17/drop_indexes.sql
index 7c9a90e27f..7c9a90e27f 100644
--- a/synapse/storage/data_stores/main/schema/delta/17/drop_indexes.sql
+++ b/synapse/storage/databases/main/schema/delta/17/drop_indexes.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/17/server_keys.sql b/synapse/storage/databases/main/schema/delta/17/server_keys.sql
index 70b247a06b..70b247a06b 100644
--- a/synapse/storage/data_stores/main/schema/delta/17/server_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/17/server_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/17/user_threepids.sql b/synapse/storage/databases/main/schema/delta/17/user_threepids.sql
index c17715ac80..c17715ac80 100644
--- a/synapse/storage/data_stores/main/schema/delta/17/user_threepids.sql
+++ b/synapse/storage/databases/main/schema/delta/17/user_threepids.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql
index 6e0871c92b..6e0871c92b 100644
--- a/synapse/storage/data_stores/main/schema/delta/18/server_keys_bigger_ints.sql
+++ b/synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/19/event_index.sql b/synapse/storage/databases/main/schema/delta/19/event_index.sql
index 18b97b4332..18b97b4332 100644
--- a/synapse/storage/data_stores/main/schema/delta/19/event_index.sql
+++ b/synapse/storage/databases/main/schema/delta/19/event_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/20/dummy.sql b/synapse/storage/databases/main/schema/delta/20/dummy.sql
index e0ac49d1ec..e0ac49d1ec 100644
--- a/synapse/storage/data_stores/main/schema/delta/20/dummy.sql
+++ b/synapse/storage/databases/main/schema/delta/20/dummy.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/20/pushers.py b/synapse/storage/databases/main/schema/delta/20/pushers.py
index 3edfcfd783..3edfcfd783 100644
--- a/synapse/storage/data_stores/main/schema/delta/20/pushers.py
+++ b/synapse/storage/databases/main/schema/delta/20/pushers.py
diff --git a/synapse/storage/data_stores/main/schema/delta/21/end_to_end_keys.sql b/synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql
index 4c2fb20b77..4c2fb20b77 100644
--- a/synapse/storage/data_stores/main/schema/delta/21/end_to_end_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/21/receipts.sql b/synapse/storage/databases/main/schema/delta/21/receipts.sql
index d070845477..d070845477 100644
--- a/synapse/storage/data_stores/main/schema/delta/21/receipts.sql
+++ b/synapse/storage/databases/main/schema/delta/21/receipts.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/22/receipts_index.sql b/synapse/storage/databases/main/schema/delta/22/receipts_index.sql
index bfc0b3bcaa..bfc0b3bcaa 100644
--- a/synapse/storage/data_stores/main/schema/delta/22/receipts_index.sql
+++ b/synapse/storage/databases/main/schema/delta/22/receipts_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/22/user_threepids_unique.sql b/synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql
index 87edfa454c..87edfa454c 100644
--- a/synapse/storage/data_stores/main/schema/delta/22/user_threepids_unique.sql
+++ b/synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/24/stats_reporting.sql b/synapse/storage/databases/main/schema/delta/24/stats_reporting.sql
index acea7483bd..acea7483bd 100644
--- a/synapse/storage/data_stores/main/schema/delta/24/stats_reporting.sql
+++ b/synapse/storage/databases/main/schema/delta/24/stats_reporting.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/25/fts.py b/synapse/storage/databases/main/schema/delta/25/fts.py
index ee675e71ff..ee675e71ff 100644
--- a/synapse/storage/data_stores/main/schema/delta/25/fts.py
+++ b/synapse/storage/databases/main/schema/delta/25/fts.py
diff --git a/synapse/storage/data_stores/main/schema/delta/25/guest_access.sql b/synapse/storage/databases/main/schema/delta/25/guest_access.sql
index 1ea389b471..1ea389b471 100644
--- a/synapse/storage/data_stores/main/schema/delta/25/guest_access.sql
+++ b/synapse/storage/databases/main/schema/delta/25/guest_access.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/25/history_visibility.sql b/synapse/storage/databases/main/schema/delta/25/history_visibility.sql
index f468fc1897..f468fc1897 100644
--- a/synapse/storage/data_stores/main/schema/delta/25/history_visibility.sql
+++ b/synapse/storage/databases/main/schema/delta/25/history_visibility.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/25/tags.sql b/synapse/storage/databases/main/schema/delta/25/tags.sql
index 7a32ce68e4..7a32ce68e4 100644
--- a/synapse/storage/data_stores/main/schema/delta/25/tags.sql
+++ b/synapse/storage/databases/main/schema/delta/25/tags.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/26/account_data.sql b/synapse/storage/databases/main/schema/delta/26/account_data.sql
index e395de2b5e..e395de2b5e 100644
--- a/synapse/storage/data_stores/main/schema/delta/26/account_data.sql
+++ b/synapse/storage/databases/main/schema/delta/26/account_data.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/27/account_data.sql b/synapse/storage/databases/main/schema/delta/27/account_data.sql
index bf0558b5b3..bf0558b5b3 100644
--- a/synapse/storage/data_stores/main/schema/delta/27/account_data.sql
+++ b/synapse/storage/databases/main/schema/delta/27/account_data.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/27/forgotten_memberships.sql b/synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql
index e2094f37fe..e2094f37fe 100644
--- a/synapse/storage/data_stores/main/schema/delta/27/forgotten_memberships.sql
+++ b/synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/27/ts.py b/synapse/storage/databases/main/schema/delta/27/ts.py
index b7972cfa8e..b7972cfa8e 100644
--- a/synapse/storage/data_stores/main/schema/delta/27/ts.py
+++ b/synapse/storage/databases/main/schema/delta/27/ts.py
diff --git a/synapse/storage/data_stores/main/schema/delta/28/event_push_actions.sql b/synapse/storage/databases/main/schema/delta/28/event_push_actions.sql
index 4d519849df..4d519849df 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/event_push_actions.sql
+++ b/synapse/storage/databases/main/schema/delta/28/event_push_actions.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/28/events_room_stream.sql b/synapse/storage/databases/main/schema/delta/28/events_room_stream.sql
index 36609475f1..36609475f1 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/events_room_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/28/events_room_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/28/public_roms_index.sql b/synapse/storage/databases/main/schema/delta/28/public_roms_index.sql
index 6c1fd68c5b..6c1fd68c5b 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/public_roms_index.sql
+++ b/synapse/storage/databases/main/schema/delta/28/public_roms_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/28/receipts_user_id_index.sql b/synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql
index cb84c69baa..cb84c69baa 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/receipts_user_id_index.sql
+++ b/synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/28/upgrade_times.sql b/synapse/storage/databases/main/schema/delta/28/upgrade_times.sql
index 3e4a9ab455..3e4a9ab455 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/upgrade_times.sql
+++ b/synapse/storage/databases/main/schema/delta/28/upgrade_times.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/28/users_is_guest.sql b/synapse/storage/databases/main/schema/delta/28/users_is_guest.sql
index 21d2b420bf..21d2b420bf 100644
--- a/synapse/storage/data_stores/main/schema/delta/28/users_is_guest.sql
+++ b/synapse/storage/databases/main/schema/delta/28/users_is_guest.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/29/push_actions.sql b/synapse/storage/databases/main/schema/delta/29/push_actions.sql
index 84b21cf813..84b21cf813 100644
--- a/synapse/storage/data_stores/main/schema/delta/29/push_actions.sql
+++ b/synapse/storage/databases/main/schema/delta/29/push_actions.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/alias_creator.sql b/synapse/storage/databases/main/schema/delta/30/alias_creator.sql
index c9d0dde638..c9d0dde638 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/alias_creator.sql
+++ b/synapse/storage/databases/main/schema/delta/30/alias_creator.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/as_users.py b/synapse/storage/databases/main/schema/delta/30/as_users.py
index b42c02710a..b42c02710a 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/as_users.py
+++ b/synapse/storage/databases/main/schema/delta/30/as_users.py
diff --git a/synapse/storage/data_stores/main/schema/delta/30/deleted_pushers.sql b/synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql
index 712c454aa1..712c454aa1 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/deleted_pushers.sql
+++ b/synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/presence_stream.sql b/synapse/storage/databases/main/schema/delta/30/presence_stream.sql
index 606bbb037d..606bbb037d 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/presence_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/30/presence_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/public_rooms.sql b/synapse/storage/databases/main/schema/delta/30/public_rooms.sql
index f09db4faa6..f09db4faa6 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/public_rooms.sql
+++ b/synapse/storage/databases/main/schema/delta/30/public_rooms.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/push_rule_stream.sql b/synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql
index 735aa8d5f6..735aa8d5f6 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/push_rule_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/30/threepid_guest_access_tokens.sql b/synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql
index 0dd2f1360c..0dd2f1360c 100644
--- a/synapse/storage/data_stores/main/schema/delta/30/threepid_guest_access_tokens.sql
+++ b/synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/31/invites.sql b/synapse/storage/databases/main/schema/delta/31/invites.sql
index 2c57846d5a..2c57846d5a 100644
--- a/synapse/storage/data_stores/main/schema/delta/31/invites.sql
+++ b/synapse/storage/databases/main/schema/delta/31/invites.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/31/local_media_repository_url_cache.sql b/synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql
index 9efb4280eb..9efb4280eb 100644
--- a/synapse/storage/data_stores/main/schema/delta/31/local_media_repository_url_cache.sql
+++ b/synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/31/pushers.py b/synapse/storage/databases/main/schema/delta/31/pushers.py
index 9bb504aad5..9bb504aad5 100644
--- a/synapse/storage/data_stores/main/schema/delta/31/pushers.py
+++ b/synapse/storage/databases/main/schema/delta/31/pushers.py
diff --git a/synapse/storage/data_stores/main/schema/delta/31/pushers_index.sql b/synapse/storage/databases/main/schema/delta/31/pushers_index.sql
index a82add88fd..a82add88fd 100644
--- a/synapse/storage/data_stores/main/schema/delta/31/pushers_index.sql
+++ b/synapse/storage/databases/main/schema/delta/31/pushers_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/31/search_update.py b/synapse/storage/databases/main/schema/delta/31/search_update.py
index 63b757ade6..63b757ade6 100644
--- a/synapse/storage/data_stores/main/schema/delta/31/search_update.py
+++ b/synapse/storage/databases/main/schema/delta/31/search_update.py
diff --git a/synapse/storage/data_stores/main/schema/delta/32/events.sql b/synapse/storage/databases/main/schema/delta/32/events.sql
index 1dd0f9e170..1dd0f9e170 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/events.sql
+++ b/synapse/storage/databases/main/schema/delta/32/events.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/32/openid.sql b/synapse/storage/databases/main/schema/delta/32/openid.sql
index 36f37b11c8..36f37b11c8 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/openid.sql
+++ b/synapse/storage/databases/main/schema/delta/32/openid.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/32/pusher_throttle.sql b/synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql
index d86d30c13c..d86d30c13c 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/pusher_throttle.sql
+++ b/synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql b/synapse/storage/databases/main/schema/delta/32/remove_indices.sql
index 2de50d408c..2de50d408c 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/remove_indices.sql
+++ b/synapse/storage/databases/main/schema/delta/32/remove_indices.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/32/reports.sql b/synapse/storage/databases/main/schema/delta/32/reports.sql
index d13609776f..d13609776f 100644
--- a/synapse/storage/data_stores/main/schema/delta/32/reports.sql
+++ b/synapse/storage/databases/main/schema/delta/32/reports.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql
index 61ad3fe3e8..61ad3fe3e8 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/access_tokens_device_index.sql
+++ b/synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/33/devices.sql b/synapse/storage/databases/main/schema/delta/33/devices.sql
index eca7268d82..eca7268d82 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/devices.sql
+++ b/synapse/storage/databases/main/schema/delta/33/devices.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql
index aa4a3b9f2f..aa4a3b9f2f 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql b/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql
index 6671573398..6671573398 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql
+++ b/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/33/event_fields.py b/synapse/storage/databases/main/schema/delta/33/event_fields.py
index a3e81eeac7..a3e81eeac7 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/event_fields.py
+++ b/synapse/storage/databases/main/schema/delta/33/event_fields.py
diff --git a/synapse/storage/data_stores/main/schema/delta/33/remote_media_ts.py b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py
index a26057dfb6..a26057dfb6 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/remote_media_ts.py
+++ b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py
diff --git a/synapse/storage/data_stores/main/schema/delta/33/user_ips_index.sql b/synapse/storage/databases/main/schema/delta/33/user_ips_index.sql
index 473f75a78e..473f75a78e 100644
--- a/synapse/storage/data_stores/main/schema/delta/33/user_ips_index.sql
+++ b/synapse/storage/databases/main/schema/delta/33/user_ips_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/34/appservice_stream.sql b/synapse/storage/databases/main/schema/delta/34/appservice_stream.sql
index 69e16eda0f..69e16eda0f 100644
--- a/synapse/storage/data_stores/main/schema/delta/34/appservice_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/34/appservice_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/34/cache_stream.py b/synapse/storage/databases/main/schema/delta/34/cache_stream.py
index cf09e43e2b..cf09e43e2b 100644
--- a/synapse/storage/data_stores/main/schema/delta/34/cache_stream.py
+++ b/synapse/storage/databases/main/schema/delta/34/cache_stream.py
diff --git a/synapse/storage/data_stores/main/schema/delta/34/device_inbox.sql b/synapse/storage/databases/main/schema/delta/34/device_inbox.sql
index e68844c74a..e68844c74a 100644
--- a/synapse/storage/data_stores/main/schema/delta/34/device_inbox.sql
+++ b/synapse/storage/databases/main/schema/delta/34/device_inbox.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/34/push_display_name_rename.sql b/synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql
index 0d9fe1a99a..0d9fe1a99a 100644
--- a/synapse/storage/data_stores/main/schema/delta/34/push_display_name_rename.sql
+++ b/synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/34/received_txn_purge.py b/synapse/storage/databases/main/schema/delta/34/received_txn_purge.py
index 67d505e68b..67d505e68b 100644
--- a/synapse/storage/data_stores/main/schema/delta/34/received_txn_purge.py
+++ b/synapse/storage/databases/main/schema/delta/34/received_txn_purge.py
diff --git a/synapse/storage/data_stores/main/schema/delta/35/contains_url.sql b/synapse/storage/databases/main/schema/delta/35/contains_url.sql
index 6cd123027b..6cd123027b 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/contains_url.sql
+++ b/synapse/storage/databases/main/schema/delta/35/contains_url.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/35/device_outbox.sql b/synapse/storage/databases/main/schema/delta/35/device_outbox.sql
index 17e6c43105..17e6c43105 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/device_outbox.sql
+++ b/synapse/storage/databases/main/schema/delta/35/device_outbox.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/35/device_stream_id.sql b/synapse/storage/databases/main/schema/delta/35/device_stream_id.sql
index 7ab7d942e2..7ab7d942e2 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/device_stream_id.sql
+++ b/synapse/storage/databases/main/schema/delta/35/device_stream_id.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/35/event_push_actions_index.sql b/synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql
index 2e836d8e9c..2e836d8e9c 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/event_push_actions_index.sql
+++ b/synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/35/public_room_list_change_stream.sql b/synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql
index dd2bf2e28a..dd2bf2e28a 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/public_room_list_change_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/35/stream_order_to_extrem.sql b/synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql
index 2b945d8a57..2b945d8a57 100644
--- a/synapse/storage/data_stores/main/schema/delta/35/stream_order_to_extrem.sql
+++ b/synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/36/readd_public_rooms.sql b/synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql
index 90d8fd18f9..90d8fd18f9 100644
--- a/synapse/storage/data_stores/main/schema/delta/36/readd_public_rooms.sql
+++ b/synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/37/remove_auth_idx.py b/synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py
index a377884169..a377884169 100644
--- a/synapse/storage/data_stores/main/schema/delta/37/remove_auth_idx.py
+++ b/synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py
diff --git a/synapse/storage/data_stores/main/schema/delta/37/user_threepids.sql b/synapse/storage/databases/main/schema/delta/37/user_threepids.sql
index cf7a90dd10..cf7a90dd10 100644
--- a/synapse/storage/data_stores/main/schema/delta/37/user_threepids.sql
+++ b/synapse/storage/databases/main/schema/delta/37/user_threepids.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/38/postgres_fts_gist.sql b/synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql
index 515e6b8e84..515e6b8e84 100644
--- a/synapse/storage/data_stores/main/schema/delta/38/postgres_fts_gist.sql
+++ b/synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/39/appservice_room_list.sql b/synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql
index 74bdc49073..74bdc49073 100644
--- a/synapse/storage/data_stores/main/schema/delta/39/appservice_room_list.sql
+++ b/synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/39/device_federation_stream_idx.sql b/synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql
index 00be801e90..00be801e90 100644
--- a/synapse/storage/data_stores/main/schema/delta/39/device_federation_stream_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/39/event_push_index.sql b/synapse/storage/databases/main/schema/delta/39/event_push_index.sql
index de2ad93e5c..de2ad93e5c 100644
--- a/synapse/storage/data_stores/main/schema/delta/39/event_push_index.sql
+++ b/synapse/storage/databases/main/schema/delta/39/event_push_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/39/federation_out_position.sql b/synapse/storage/databases/main/schema/delta/39/federation_out_position.sql
index 5af814290b..5af814290b 100644
--- a/synapse/storage/data_stores/main/schema/delta/39/federation_out_position.sql
+++ b/synapse/storage/databases/main/schema/delta/39/federation_out_position.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/39/membership_profile.sql b/synapse/storage/databases/main/schema/delta/39/membership_profile.sql
index 1bf911c8ab..1bf911c8ab 100644
--- a/synapse/storage/data_stores/main/schema/delta/39/membership_profile.sql
+++ b/synapse/storage/databases/main/schema/delta/39/membership_profile.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/40/current_state_idx.sql b/synapse/storage/databases/main/schema/delta/40/current_state_idx.sql
index 7ffa189f39..7ffa189f39 100644
--- a/synapse/storage/data_stores/main/schema/delta/40/current_state_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/40/current_state_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/40/device_inbox.sql b/synapse/storage/databases/main/schema/delta/40/device_inbox.sql
index b9fe1f0480..b9fe1f0480 100644
--- a/synapse/storage/data_stores/main/schema/delta/40/device_inbox.sql
+++ b/synapse/storage/databases/main/schema/delta/40/device_inbox.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/40/device_list_streams.sql b/synapse/storage/databases/main/schema/delta/40/device_list_streams.sql
index dd6dcb65f1..dd6dcb65f1 100644
--- a/synapse/storage/data_stores/main/schema/delta/40/device_list_streams.sql
+++ b/synapse/storage/databases/main/schema/delta/40/device_list_streams.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/40/event_push_summary.sql b/synapse/storage/databases/main/schema/delta/40/event_push_summary.sql
index 3918f0b794..3918f0b794 100644
--- a/synapse/storage/data_stores/main/schema/delta/40/event_push_summary.sql
+++ b/synapse/storage/databases/main/schema/delta/40/event_push_summary.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/40/pushers.sql b/synapse/storage/databases/main/schema/delta/40/pushers.sql
index 054a223f14..054a223f14 100644
--- a/synapse/storage/data_stores/main/schema/delta/40/pushers.sql
+++ b/synapse/storage/databases/main/schema/delta/40/pushers.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/41/device_list_stream_idx.sql b/synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql
index b7bee8b692..b7bee8b692 100644
--- a/synapse/storage/data_stores/main/schema/delta/41/device_list_stream_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/41/device_outbound_index.sql b/synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql
index 62f0b9892b..62f0b9892b 100644
--- a/synapse/storage/data_stores/main/schema/delta/41/device_outbound_index.sql
+++ b/synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/41/event_search_event_id_idx.sql b/synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql
index 5d9cfecf36..5d9cfecf36 100644
--- a/synapse/storage/data_stores/main/schema/delta/41/event_search_event_id_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/41/ratelimit.sql b/synapse/storage/databases/main/schema/delta/41/ratelimit.sql
index a194bf0238..a194bf0238 100644
--- a/synapse/storage/data_stores/main/schema/delta/41/ratelimit.sql
+++ b/synapse/storage/databases/main/schema/delta/41/ratelimit.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/42/current_state_delta.sql b/synapse/storage/databases/main/schema/delta/42/current_state_delta.sql
index d28851aff8..d28851aff8 100644
--- a/synapse/storage/data_stores/main/schema/delta/42/current_state_delta.sql
+++ b/synapse/storage/databases/main/schema/delta/42/current_state_delta.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/42/device_list_last_id.sql b/synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql
index 9ab8c14fa3..9ab8c14fa3 100644
--- a/synapse/storage/data_stores/main/schema/delta/42/device_list_last_id.sql
+++ b/synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/42/event_auth_state_only.sql b/synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql
index b8821ac759..b8821ac759 100644
--- a/synapse/storage/data_stores/main/schema/delta/42/event_auth_state_only.sql
+++ b/synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/42/user_dir.py b/synapse/storage/databases/main/schema/delta/42/user_dir.py
index 506f326f4d..506f326f4d 100644
--- a/synapse/storage/data_stores/main/schema/delta/42/user_dir.py
+++ b/synapse/storage/databases/main/schema/delta/42/user_dir.py
diff --git a/synapse/storage/data_stores/main/schema/delta/43/blocked_rooms.sql b/synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql
index 0e3cd143ff..0e3cd143ff 100644
--- a/synapse/storage/data_stores/main/schema/delta/43/blocked_rooms.sql
+++ b/synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/43/quarantine_media.sql b/synapse/storage/databases/main/schema/delta/43/quarantine_media.sql
index 630907ec4f..630907ec4f 100644
--- a/synapse/storage/data_stores/main/schema/delta/43/quarantine_media.sql
+++ b/synapse/storage/databases/main/schema/delta/43/quarantine_media.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/43/url_cache.sql b/synapse/storage/databases/main/schema/delta/43/url_cache.sql
index 45ebe020da..45ebe020da 100644
--- a/synapse/storage/data_stores/main/schema/delta/43/url_cache.sql
+++ b/synapse/storage/databases/main/schema/delta/43/url_cache.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/43/user_share.sql b/synapse/storage/databases/main/schema/delta/43/user_share.sql
index ee7062abe4..ee7062abe4 100644
--- a/synapse/storage/data_stores/main/schema/delta/43/user_share.sql
+++ b/synapse/storage/databases/main/schema/delta/43/user_share.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/44/expire_url_cache.sql b/synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql
index b12f9b2ebf..b12f9b2ebf 100644
--- a/synapse/storage/data_stores/main/schema/delta/44/expire_url_cache.sql
+++ b/synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/45/group_server.sql b/synapse/storage/databases/main/schema/delta/45/group_server.sql
index b2333848a0..b2333848a0 100644
--- a/synapse/storage/data_stores/main/schema/delta/45/group_server.sql
+++ b/synapse/storage/databases/main/schema/delta/45/group_server.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/45/profile_cache.sql b/synapse/storage/databases/main/schema/delta/45/profile_cache.sql
index e5ddc84df0..e5ddc84df0 100644
--- a/synapse/storage/data_stores/main/schema/delta/45/profile_cache.sql
+++ b/synapse/storage/databases/main/schema/delta/45/profile_cache.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/drop_refresh_tokens.sql b/synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql
index 68c48a89a9..68c48a89a9 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/drop_refresh_tokens.sql
+++ b/synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/drop_unique_deleted_pushers.sql b/synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql
index bb307889c1..bb307889c1 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/drop_unique_deleted_pushers.sql
+++ b/synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/group_server.sql b/synapse/storage/databases/main/schema/delta/46/group_server.sql
index 097679bc9a..097679bc9a 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/group_server.sql
+++ b/synapse/storage/databases/main/schema/delta/46/group_server.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/local_media_repository_url_idx.sql b/synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql
index bbfc7f5d1a..bbfc7f5d1a 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/local_media_repository_url_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/user_dir_null_room_ids.sql b/synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql
index cb0d5a2576..cb0d5a2576 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/user_dir_null_room_ids.sql
+++ b/synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/46/user_dir_typos.sql b/synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql
index d9505f8da1..d9505f8da1 100644
--- a/synapse/storage/data_stores/main/schema/delta/46/user_dir_typos.sql
+++ b/synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/47/last_access_media.sql b/synapse/storage/databases/main/schema/delta/47/last_access_media.sql
index f505fb22b5..f505fb22b5 100644
--- a/synapse/storage/data_stores/main/schema/delta/47/last_access_media.sql
+++ b/synapse/storage/databases/main/schema/delta/47/last_access_media.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/47/postgres_fts_gin.sql b/synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql
index 31d7a817eb..31d7a817eb 100644
--- a/synapse/storage/data_stores/main/schema/delta/47/postgres_fts_gin.sql
+++ b/synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/47/push_actions_staging.sql b/synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql
index edccf4a96f..edccf4a96f 100644
--- a/synapse/storage/data_stores/main/schema/delta/47/push_actions_staging.sql
+++ b/synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/48/add_user_consent.sql b/synapse/storage/databases/main/schema/delta/48/add_user_consent.sql
index 5237491506..5237491506 100644
--- a/synapse/storage/data_stores/main/schema/delta/48/add_user_consent.sql
+++ b/synapse/storage/databases/main/schema/delta/48/add_user_consent.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql
index 9248b0b24a..9248b0b24a 100644
--- a/synapse/storage/data_stores/main/schema/delta/48/add_user_ips_last_seen_index.sql
+++ b/synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/48/deactivated_users.sql b/synapse/storage/databases/main/schema/delta/48/deactivated_users.sql
index e9013a6969..e9013a6969 100644
--- a/synapse/storage/data_stores/main/schema/delta/48/deactivated_users.sql
+++ b/synapse/storage/databases/main/schema/delta/48/deactivated_users.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/48/group_unique_indexes.py b/synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py
index 49f5f2c003..49f5f2c003 100644
--- a/synapse/storage/data_stores/main/schema/delta/48/group_unique_indexes.py
+++ b/synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py
diff --git a/synapse/storage/data_stores/main/schema/delta/48/groups_joinable.sql b/synapse/storage/databases/main/schema/delta/48/groups_joinable.sql
index ce26eaf0c9..ce26eaf0c9 100644
--- a/synapse/storage/data_stores/main/schema/delta/48/groups_joinable.sql
+++ b/synapse/storage/databases/main/schema/delta/48/groups_joinable.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/49/add_user_consent_server_notice_sent.sql b/synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql
index 14dcf18d73..14dcf18d73 100644
--- a/synapse/storage/data_stores/main/schema/delta/49/add_user_consent_server_notice_sent.sql
+++ b/synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/49/add_user_daily_visits.sql b/synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql
index 3dd478196f..3dd478196f 100644
--- a/synapse/storage/data_stores/main/schema/delta/49/add_user_daily_visits.sql
+++ b/synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/49/add_user_ips_last_seen_only_index.sql b/synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql
index 3a4ed59b5b..3a4ed59b5b 100644
--- a/synapse/storage/data_stores/main/schema/delta/49/add_user_ips_last_seen_only_index.sql
+++ b/synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/50/add_creation_ts_users_index.sql b/synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql
index c93ae47532..c93ae47532 100644
--- a/synapse/storage/data_stores/main/schema/delta/50/add_creation_ts_users_index.sql
+++ b/synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/50/erasure_store.sql b/synapse/storage/databases/main/schema/delta/50/erasure_store.sql
index 5d8641a9ab..5d8641a9ab 100644
--- a/synapse/storage/data_stores/main/schema/delta/50/erasure_store.sql
+++ b/synapse/storage/databases/main/schema/delta/50/erasure_store.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/50/make_event_content_nullable.py b/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py
index b1684a8441..b1684a8441 100644
--- a/synapse/storage/data_stores/main/schema/delta/50/make_event_content_nullable.py
+++ b/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py
diff --git a/synapse/storage/data_stores/main/schema/delta/51/e2e_room_keys.sql b/synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql
index c0e66a697d..c0e66a697d 100644
--- a/synapse/storage/data_stores/main/schema/delta/51/e2e_room_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/51/monthly_active_users.sql b/synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql
index c9d537d5a3..c9d537d5a3 100644
--- a/synapse/storage/data_stores/main/schema/delta/51/monthly_active_users.sql
+++ b/synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql
index 91e03d13e1..91e03d13e1 100644
--- a/synapse/storage/data_stores/main/schema/delta/52/add_event_to_state_group_index.sql
+++ b/synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql
index bfa49e6f92..bfa49e6f92 100644
--- a/synapse/storage/data_stores/main/schema/delta/52/device_list_streams_unique_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/52/e2e_room_keys.sql b/synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql
index db687cccae..db687cccae 100644
--- a/synapse/storage/data_stores/main/schema/delta/52/e2e_room_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/add_user_type_to_users.sql b/synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql
index 88ec2f83e5..88ec2f83e5 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/add_user_type_to_users.sql
+++ b/synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/drop_sent_transactions.sql b/synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql
index e372f5a44a..e372f5a44a 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/drop_sent_transactions.sql
+++ b/synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/event_format_version.sql b/synapse/storage/databases/main/schema/delta/53/event_format_version.sql
index 1d977c2834..1d977c2834 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/event_format_version.sql
+++ b/synapse/storage/databases/main/schema/delta/53/event_format_version.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql b/synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql
index ffcc896b58..ffcc896b58 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql
+++ b/synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/user_ips_index.sql b/synapse/storage/databases/main/schema/delta/53/user_ips_index.sql
index b812c5794f..b812c5794f 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/user_ips_index.sql
+++ b/synapse/storage/databases/main/schema/delta/53/user_ips_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/user_share.sql b/synapse/storage/databases/main/schema/delta/53/user_share.sql
index 5831b1a6f8..5831b1a6f8 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/user_share.sql
+++ b/synapse/storage/databases/main/schema/delta/53/user_share.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/user_threepid_id.sql b/synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql
index 80c2c573b6..80c2c573b6 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/user_threepid_id.sql
+++ b/synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/53/users_in_public_rooms.sql b/synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql
index f7827ca6d2..f7827ca6d2 100644
--- a/synapse/storage/data_stores/main/schema/delta/53/users_in_public_rooms.sql
+++ b/synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/account_validity_with_renewal.sql b/synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql
index 0adb2ad55e..0adb2ad55e 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/account_validity_with_renewal.sql
+++ b/synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql
index c01aa9d2d9..c01aa9d2d9 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/add_validity_to_server_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql
index b062ec840c..b062ec840c 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/delete_forward_extremities.sql
+++ b/synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql
index dbbe682697..dbbe682697 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/drop_legacy_tables.sql
+++ b/synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/drop_presence_list.sql b/synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql
index e6ee70c623..e6ee70c623 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/drop_presence_list.sql
+++ b/synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/relations.sql b/synapse/storage/databases/main/schema/delta/54/relations.sql
index 134862b870..134862b870 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/relations.sql
+++ b/synapse/storage/databases/main/schema/delta/54/relations.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/stats.sql b/synapse/storage/databases/main/schema/delta/54/stats.sql
index 652e58308e..652e58308e 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/stats.sql
+++ b/synapse/storage/databases/main/schema/delta/54/stats.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/54/stats2.sql b/synapse/storage/databases/main/schema/delta/54/stats2.sql
index 3b2d48447f..3b2d48447f 100644
--- a/synapse/storage/data_stores/main/schema/delta/54/stats2.sql
+++ b/synapse/storage/databases/main/schema/delta/54/stats2.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/55/access_token_expiry.sql b/synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql
index 4590604bfd..4590604bfd 100644
--- a/synapse/storage/data_stores/main/schema/delta/55/access_token_expiry.sql
+++ b/synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/55/track_threepid_validations.sql b/synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql
index a8eced2e0a..a8eced2e0a 100644
--- a/synapse/storage/data_stores/main/schema/delta/55/track_threepid_validations.sql
+++ b/synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql
index dabdde489b..dabdde489b 100644
--- a/synapse/storage/data_stores/main/schema/delta/55/users_alter_deactivated.sql
+++ b/synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql
index 41807eb1e7..41807eb1e7 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/add_spans_to_device_lists.sql
+++ b/synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership.sql b/synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql
index 473018676f..473018676f 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership.sql
+++ b/synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql
index 3133d42d4a..3133d42d4a 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/current_state_events_membership_mk2.sql
+++ b/synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql b/synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql
index 1d2ddb1b1a..1d2ddb1b1a 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/delete_keys_from_deleted_backups.sql
+++ b/synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql
index f00889290b..f00889290b 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/destinations_failure_ts.sql
+++ b/synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/destinations_retry_interval_type.sql.postgres b/synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres
index b9bbb18a91..b9bbb18a91 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/destinations_retry_interval_type.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql b/synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql
index c2f557fde9..c2f557fde9 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/device_stream_id_insert.sql
+++ b/synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/devices_last_seen.sql b/synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql
index dfa902d0ba..dfa902d0ba 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/devices_last_seen.sql
+++ b/synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql
index 9f09922c67..9f09922c67 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/drop_unused_event_tables.sql
+++ b/synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql b/synapse/storage/databases/main/schema/delta/56/event_expiry.sql
index 81a36a8b1d..81a36a8b1d 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/event_expiry.sql
+++ b/synapse/storage/databases/main/schema/delta/56/event_expiry.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_labels.sql b/synapse/storage/databases/main/schema/delta/56/event_labels.sql
index 5e29c1da19..5e29c1da19 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/event_labels.sql
+++ b/synapse/storage/databases/main/schema/delta/56/event_labels.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql b/synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql
index 5f5e0499ae..5f5e0499ae 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql
+++ b/synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql
index 014cb3b538..014cb3b538 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/fix_room_keys_index.sql
+++ b/synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/hidden_devices.sql b/synapse/storage/databases/main/schema/delta/56/hidden_devices.sql
index 67f8b20297..67f8b20297 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/hidden_devices.sql
+++ b/synapse/storage/databases/main/schema/delta/56/hidden_devices.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/hidden_devices_fix.sql.sqlite b/synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite
index e8b1fd35d8..e8b1fd35d8 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/hidden_devices_fix.sql.sqlite
+++ b/synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite
diff --git a/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql
index 4f24c1405d..4f24c1405d 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql
+++ b/synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/public_room_list_idx.sql b/synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql
index 7be31ffebb..7be31ffebb 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/public_room_list_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql b/synapse/storage/databases/main/schema/delta/56/redaction_censor.sql
index ea95db0ed7..ea95db0ed7 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor.sql
+++ b/synapse/storage/databases/main/schema/delta/56/redaction_censor.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql
index 49ce35d794..49ce35d794 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor2.sql
+++ b/synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres b/synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres
index 67471f3ef5..67471f3ef5 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql b/synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql
index b7550f6f4e..b7550f6f4e 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/redaction_censor4.sql
+++ b/synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql
index aeb17813d3..aeb17813d3 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql
+++ b/synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql b/synapse/storage/databases/main/schema/delta/56/room_key_etag.sql
index 7d70dd071e..7d70dd071e 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql
+++ b/synapse/storage/databases/main/schema/delta/56/room_key_etag.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_membership_idx.sql b/synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql
index 92ab1f5e65..92ab1f5e65 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/room_membership_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql b/synapse/storage/databases/main/schema/delta/56/room_retention.sql
index ee6cdf7a14..ee6cdf7a14 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql
+++ b/synapse/storage/databases/main/schema/delta/56/room_retention.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql b/synapse/storage/databases/main/schema/delta/56/signing_keys.sql
index 5c5fffcafb..5c5fffcafb 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/signing_keys.sql
+++ b/synapse/storage/databases/main/schema/delta/56/signing_keys.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql b/synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql
index 0aa90ebf0c..0aa90ebf0c 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/signing_keys_nonunique_signatures.sql
+++ b/synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/stats_separated.sql b/synapse/storage/databases/main/schema/delta/56/stats_separated.sql
index bbdde121e8..bbdde121e8 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/stats_separated.sql
+++ b/synapse/storage/databases/main/schema/delta/56/stats_separated.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py
index 1de8b54961..1de8b54961 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/unique_user_filter_index.py
+++ b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py
diff --git a/synapse/storage/data_stores/main/schema/delta/56/user_external_ids.sql b/synapse/storage/databases/main/schema/delta/56/user_external_ids.sql
index 91390c4527..91390c4527 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/user_external_ids.sql
+++ b/synapse/storage/databases/main/schema/delta/56/user_external_ids.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql
index 149f8be8b6..149f8be8b6 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/users_in_public_rooms_idx.sql
+++ b/synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql
index aec06c8261..aec06c8261 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql
+++ b/synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql
index c3b6de2099..c3b6de2099 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql
+++ b/synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py
index 63b5acdcf7..63b5acdcf7 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py
+++ b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py
diff --git a/synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql b/synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql
index 133d80af35..133d80af35 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/remove_sent_outbound_pokes.sql
+++ b/synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql
index 352a66f5b0..352a66f5b0 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql
+++ b/synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres
index c601cff6de..c601cff6de 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite
index 335c6f2074..335c6f2074 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite
+++ b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres
index 92aaadde0d..92aaadde0d 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite
index e19dab97cb..e19dab97cb 100644
--- a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite
+++ b/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite
diff --git a/synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql b/synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql
index fdc39e9ba5..fdc39e9ba5 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/02remove_dup_outbound_pokes.sql
+++ b/synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/58/03persist_ui_auth.sql b/synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql
index dcb593fc2d..dcb593fc2d 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/03persist_ui_auth.sql
+++ b/synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/58/05cache_instance.sql.postgres b/synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres
index aa46eb0e10..aa46eb0e10 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/05cache_instance.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/58/06dlols_unique_idx.py b/synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py
index d353f2bcb3..d353f2bcb3 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/06dlols_unique_idx.py
+++ b/synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py
diff --git a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres b/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres
index 597f2ffd3d..597f2ffd3d 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres
+++ b/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite b/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite
index 69db89ac0e..69db89ac0e 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite
+++ b/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite
diff --git a/synapse/storage/data_stores/main/schema/delta/58/10drop_local_rejections_stream.sql b/synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql
index eb57203e46..eb57203e46 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/10drop_local_rejections_stream.sql
+++ b/synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/58/10federation_pos_instance_name.sql b/synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql
index 1cc2633aad..1cc2633aad 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/10federation_pos_instance_name.sql
+++ b/synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql
diff --git a/synapse/storage/data_stores/main/schema/delta/58/11user_id_seq.py b/synapse/storage/databases/main/schema/delta/58/11user_id_seq.py
index 2011f6bceb..4310ec12ce 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/11user_id_seq.py
+++ b/synapse/storage/databases/main/schema/delta/58/11user_id_seq.py
@@ -16,7 +16,7 @@
 Adds a postgres SEQUENCE for generating guest user IDs.
 """
 
-from synapse.storage.data_stores.main.registration import (
+from synapse.storage.databases.main.registration import (
     find_max_generated_user_id_localpart,
 )
 from synapse.storage.engines import PostgresEngine
diff --git a/synapse/storage/databases/main/schema/delta/58/12room_stats.sql b/synapse/storage/databases/main/schema/delta/58/12room_stats.sql
new file mode 100644
index 0000000000..cade5dcca8
--- /dev/null
+++ b/synapse/storage/databases/main/schema/delta/58/12room_stats.sql
@@ -0,0 +1,32 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Recalculate the stats for all rooms after the fix to joined_members erroneously
+-- incrementing on per-room profile changes.
+
+-- Note that the populate_stats_process_rooms background update is already set to
+-- run if you're upgrading from Synapse <1.0.0.
+
+-- Additionally, if you've upgraded to v1.18.0 (which doesn't include this fix),
+-- this bg job runs, and then update to v1.19.0, you'd end up with only half of
+-- your rooms having room stats recalculated after this fix was in place.
+
+-- So we've switched the old `populate_stats_process_rooms` background job to a
+-- no-op, and then kick off a bg job with a new name, but with the same
+-- functionality as the old one. This effectively restarts the background job
+-- from the beginning, without running it twice in a row, supporting both
+-- upgrade usecases.
+INSERT INTO background_updates (update_name, progress_json) VALUES
+    ('populate_stats_process_rooms_2', '{}');
diff --git a/synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql b/synapse/storage/databases/main/schema/delta/58/12unread_messages.sql
index 531b532c73..531b532c73 100644
--- a/synapse/storage/data_stores/main/schema/delta/58/12unread_messages.sql
+++ b/synapse/storage/databases/main/schema/delta/58/12unread_messages.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/application_services.sql b/synapse/storage/databases/main/schema/full_schemas/16/application_services.sql
index 883fcd10b2..883fcd10b2 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/application_services.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/application_services.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/event_edges.sql b/synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql
index 10ce2aa7a0..10ce2aa7a0 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/event_edges.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/event_signatures.sql b/synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql
index 95826da431..95826da431 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/event_signatures.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/im.sql b/synapse/storage/databases/main/schema/full_schemas/16/im.sql
index a1a2aa8e5b..a1a2aa8e5b 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/im.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/im.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/keys.sql b/synapse/storage/databases/main/schema/full_schemas/16/keys.sql
index 11cdffdbb3..11cdffdbb3 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/keys.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/keys.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/media_repository.sql b/synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql
index 8f3759bb2a..8f3759bb2a 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/media_repository.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/presence.sql b/synapse/storage/databases/main/schema/full_schemas/16/presence.sql
index 01d2d8f833..01d2d8f833 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/presence.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/presence.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/profiles.sql b/synapse/storage/databases/main/schema/full_schemas/16/profiles.sql
index c04f4747d9..c04f4747d9 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/profiles.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/profiles.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/push.sql b/synapse/storage/databases/main/schema/full_schemas/16/push.sql
index e44465cf45..e44465cf45 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/push.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/push.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/redactions.sql b/synapse/storage/databases/main/schema/full_schemas/16/redactions.sql
index 318f0d9aa5..318f0d9aa5 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/redactions.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/redactions.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/room_aliases.sql b/synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql
index d47da3b12f..d47da3b12f 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/room_aliases.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/state.sql b/synapse/storage/databases/main/schema/full_schemas/16/state.sql
index 96391a8f0e..96391a8f0e 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/state.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/state.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/transactions.sql b/synapse/storage/databases/main/schema/full_schemas/16/transactions.sql
index 17e67bedac..17e67bedac 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/transactions.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/transactions.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/16/users.sql b/synapse/storage/databases/main/schema/full_schemas/16/users.sql
index f013aa8b18..f013aa8b18 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/16/users.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/16/users.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres
index 889a9a0ce4..889a9a0ce4 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
+++ b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite
index a0411ede7e..a0411ede7e 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
+++ b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql b/synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql
index 91d21b2921..91d21b2921 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/stream_positions.sql
+++ b/synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/databases/main/schema/full_schemas/README.md
index c00f287190..c00f287190 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/README.md
+++ b/synapse/storage/databases/main/schema/full_schemas/README.md
diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/databases/main/search.py
index d52228297c..2162d0712d 100644
--- a/synapse/storage/data_stores/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -21,8 +21,8 @@ from twisted.internet import defer
 
 from synapse.api.errors import SynapseError
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 
 logger = logging.getLogger(__name__)
@@ -88,16 +88,16 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
     EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist"
     EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SearchBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
         if not hs.config.enable_search:
             return
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order
         )
 
@@ -106,11 +106,11 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
         # a GIN index. However, it's possible that some people might still have
         # the background update queued, so we register a handler to clear the
         # background update.
-        self.db.updates.register_noop_background_update(
+        self.db_pool.updates.register_noop_background_update(
             self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME
         )
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search
         )
 
@@ -140,7 +140,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
             # store_search_entries_txn with a generator function, but that
             # would mean having two cursors open on the database at once.
             # Instead we just build a list of results.
-            rows = self.db.cursor_to_dict(txn)
+            rows = self.db_pool.cursor_to_dict(txn)
             if not rows:
                 return 0
 
@@ -200,18 +200,20 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 "rows_inserted": rows_inserted + len(event_search_rows),
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.EVENT_SEARCH_UPDATE_NAME, progress
             )
 
             return len(event_search_rows)
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
         )
 
         if not result:
-            yield self.db.updates._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
+            yield self.db_pool.updates._end_background_update(
+                self.EVENT_SEARCH_UPDATE_NAME
+            )
 
         return result
 
@@ -253,9 +255,9 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 conn.set_session(autocommit=False)
 
         if isinstance(self.database_engine, PostgresEngine):
-            yield self.db.runWithConnection(create_index)
+            yield self.db_pool.runWithConnection(create_index)
 
-        yield self.db.updates._end_background_update(
+        yield self.db_pool.updates._end_background_update(
             self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME
         )
         return 1
@@ -286,14 +288,14 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 )
                 conn.set_session(autocommit=False)
 
-            yield self.db.runWithConnection(create_index)
+            yield self.db_pool.runWithConnection(create_index)
 
             pg = dict(progress)
             pg["have_added_indexes"] = True
 
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME,
-                self.db.updates._background_update_progress_txn,
+                self.db_pool.updates._background_update_progress_txn,
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME,
                 pg,
             )
@@ -323,18 +325,18 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
                 "have_added_indexes": True,
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.EVENT_SEARCH_ORDER_UPDATE_NAME, progress
             )
 
             return len(rows), True
 
-        num_rows, finished = yield self.db.runInteraction(
+        num_rows, finished = yield self.db_pool.runInteraction(
             self.EVENT_SEARCH_ORDER_UPDATE_NAME, reindex_search_txn
         )
 
         if not finished:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 self.EVENT_SEARCH_ORDER_UPDATE_NAME
             )
 
@@ -342,7 +344,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
 
 
 class SearchStore(SearchBackgroundUpdateStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(SearchStore, self).__init__(database, db_conn, hs)
 
     @defer.inlineCallbacks
@@ -423,8 +425,8 @@ class SearchStore(SearchBackgroundUpdateStore):
         # entire table from the database.
         sql += " ORDER BY rank DESC LIMIT 500"
 
-        results = yield self.db.execute(
-            "search_msgs", self.db.cursor_to_dict, sql, *args
+        results = yield self.db_pool.execute(
+            "search_msgs", self.db_pool.cursor_to_dict, sql, *args
         )
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
@@ -444,8 +446,8 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         count_sql += " GROUP BY room_id"
 
-        count_results = yield self.db.execute(
-            "search_rooms_count", self.db.cursor_to_dict, count_sql, *count_args
+        count_results = yield self.db_pool.execute(
+            "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
         )
 
         count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
@@ -575,8 +577,8 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         args.append(limit)
 
-        results = yield self.db.execute(
-            "search_rooms", self.db.cursor_to_dict, sql, *args
+        results = yield self.db_pool.execute(
+            "search_rooms", self.db_pool.cursor_to_dict, sql, *args
         )
 
         results = list(filter(lambda row: row["room_id"] in room_ids, results))
@@ -596,8 +598,8 @@ class SearchStore(SearchBackgroundUpdateStore):
 
         count_sql += " GROUP BY room_id"
 
-        count_results = yield self.db.execute(
-            "search_rooms_count", self.db.cursor_to_dict, count_sql, *count_args
+        count_results = yield self.db_pool.execute(
+            "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
         )
 
         count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
@@ -682,7 +684,7 @@ class SearchStore(SearchBackgroundUpdateStore):
 
             return highlight_words
 
-        return self.db.runInteraction("_find_highlights", f)
+        return self.db_pool.runInteraction("_find_highlights", f)
 
 
 def _to_postgres_options(options_dict):
diff --git a/synapse/storage/data_stores/main/signatures.py b/synapse/storage/databases/main/signatures.py
index 36244d9f5d..dae8e8bd29 100644
--- a/synapse/storage/data_stores/main/signatures.py
+++ b/synapse/storage/databases/main/signatures.py
@@ -38,7 +38,7 @@ class SignatureWorkerStore(SQLBaseStore):
                 for event_id in event_ids
             }
 
-        return self.db.runInteraction("get_event_reference_hashes", f)
+        return self.db_pool.runInteraction("get_event_reference_hashes", f)
 
     @defer.inlineCallbacks
     def add_event_hashes(self, event_ids):
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/databases/main/state.py
index a360699408..96e0378e50 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -23,9 +23,9 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
 from synapse.events import EventBase
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
 from synapse.storage.state import StateFilter
 from synapse.util.caches import intern_string
 from synapse.util.caches.descriptors import cached, cachedList
@@ -54,7 +54,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
     """The parts of StateGroupStore that can be called from workers.
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
 
     async def get_room_version(self, room_id: str) -> RoomVersion:
@@ -93,7 +93,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
         # We really should have an entry in the rooms table for every room we
         # care about, but let's be a bit paranoid (at least while the background
         # update is happening) to avoid breaking existing rooms.
-        version = await self.db.simple_select_one_onecol(
+        version = await self.db_pool.simple_select_one_onecol(
             table="rooms",
             keyvalues={"room_id": room_id},
             retcol="room_version",
@@ -184,7 +184,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
             return {(intern_string(r[0]), intern_string(r[1])): r[2] for r in txn}
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_current_state_ids", _get_current_state_ids_txn
         )
 
@@ -231,7 +231,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
             return results
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_filtered_current_state_ids", _get_filtered_current_state_ids_txn
         )
 
@@ -261,7 +261,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     @cached(max_entries=50000)
     def _get_state_group_for_event(self, event_id):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="event_to_state_groups",
             keyvalues={"event_id": event_id},
             retcol="state_group",
@@ -278,7 +278,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
     def _get_state_group_for_events(self, event_ids):
         """Returns mapping event_id -> state_group
         """
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="event_to_state_groups",
             column="event_id",
             iterable=event_ids,
@@ -301,7 +301,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
             The subset of state groups that are referenced.
         """
 
-        rows = await self.db.simple_select_many_batch(
+        rows = await self.db_pool.simple_select_many_batch(
             table="event_to_state_groups",
             column="state_group",
             iterable=state_groups,
@@ -319,25 +319,25 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
     EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
     DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
         self.server_name = hs.hostname
 
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             self.CURRENT_STATE_INDEX_UPDATE_NAME,
             index_name="current_state_events_member_index",
             table="current_state_events",
             columns=["state_key"],
             where_clause="type='m.room.member'",
         )
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
             index_name="event_to_state_groups_sg_index",
             table="event_to_state_groups",
             columns=["state_group"],
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms,
         )
 
@@ -429,7 +429,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
             # potentially stale, since there may have been a period where the
             # server didn't share a room with the remote user and therefore may
             # have missed any device updates.
-            rows = self.db.simple_select_many_txn(
+            rows = self.db_pool.simple_select_many_txn(
                 txn,
                 table="current_state_events",
                 column="room_id",
@@ -441,7 +441,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
             potentially_left_users = {row["state_key"] for row in rows}
 
             # Now lets actually delete the rooms from the DB.
-            self.db.simple_delete_many_txn(
+            self.db_pool.simple_delete_many_txn(
                 txn,
                 table="current_state_events",
                 column="room_id",
@@ -449,7 +449,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
                 keyvalues={},
             )
 
-            self.db.simple_delete_many_txn(
+            self.db_pool.simple_delete_many_txn(
                 txn,
                 table="event_forward_extremities",
                 column="room_id",
@@ -457,7 +457,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
                 keyvalues={},
             )
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn,
                 self.DELETE_CURRENT_STATE_UPDATE_NAME,
                 {"last_room_id": room_ids[-1]},
@@ -465,12 +465,12 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
 
             return False, potentially_left_users
 
-        finished, potentially_left_users = await self.db.runInteraction(
+        finished, potentially_left_users = await self.db_pool.runInteraction(
             "_background_remove_left_rooms", _background_remove_left_rooms_txn
         )
 
         if finished:
-            await self.db.updates._end_background_update(
+            await self.db_pool.updates._end_background_update(
                 self.DELETE_CURRENT_STATE_UPDATE_NAME
             )
 
@@ -505,5 +505,5 @@ class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
       * `state_groups_state`: Maps state group to state events.
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StateStore, self).__init__(database, db_conn, hs)
diff --git a/synapse/storage/data_stores/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
index 725e12507f..0d963c98ff 100644
--- a/synapse/storage/data_stores/main/state_deltas.py
+++ b/synapse/storage/databases/main/state_deltas.py
@@ -100,14 +100,14 @@ class StateDeltasStore(SQLBaseStore):
                 ORDER BY stream_id ASC
             """
             txn.execute(sql, (prev_stream_id, clipped_stream_id))
-            return clipped_stream_id, self.db.cursor_to_dict(txn)
+            return clipped_stream_id, self.db_pool.cursor_to_dict(txn)
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_current_state_deltas", get_current_state_deltas_txn
         )
 
     def _get_max_stream_id_in_current_state_deltas_txn(self, txn):
-        return self.db.simple_select_one_onecol_txn(
+        return self.db_pool.simple_select_one_onecol_txn(
             txn,
             table="current_state_delta_stream",
             keyvalues={},
@@ -115,7 +115,7 @@ class StateDeltasStore(SQLBaseStore):
         )
 
     def get_max_stream_id_in_current_state_deltas(self):
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_max_stream_id_in_current_state_deltas",
             self._get_max_stream_id_in_current_state_deltas_txn,
         )
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/databases/main/stats.py
index 922400a7c3..802c9019b9 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -21,8 +21,8 @@ from typing import Tuple
 from twisted.internet.defer import DeferredLock
 
 from synapse.api.constants import EventTypes, Membership
-from synapse.storage.data_stores.main.state_deltas import StateDeltasStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.state_deltas import StateDeltasStore
 from synapse.storage.engines import PostgresEngine
 from synapse.util.caches.descriptors import cached
 
@@ -59,7 +59,7 @@ TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")}
 
 
 class StatsStore(StateDeltasStore):
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StatsStore, self).__init__(database, db_conn, hs)
 
         self.server_name = hs.hostname
@@ -69,17 +69,20 @@ class StatsStore(StateDeltasStore):
 
         self.stats_delta_processing_lock = DeferredLock()
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "populate_stats_process_rooms", self._populate_stats_process_rooms
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
+            "populate_stats_process_rooms_2", self._populate_stats_process_rooms_2
+        )
+        self.db_pool.updates.register_background_update_handler(
             "populate_stats_process_users", self._populate_stats_process_users
         )
         # we no longer need to perform clean-up, but we will give ourselves
         # the potential to reintroduce it in the future – so documentation
         # will still encourage the use of this no-op handler.
-        self.db.updates.register_noop_background_update("populate_stats_cleanup")
-        self.db.updates.register_noop_background_update("populate_stats_prepare")
+        self.db_pool.updates.register_noop_background_update("populate_stats_cleanup")
+        self.db_pool.updates.register_noop_background_update("populate_stats_prepare")
 
     def quantise_stats_time(self, ts):
         """
@@ -102,7 +105,9 @@ class StatsStore(StateDeltasStore):
         This is a background update which regenerates statistics for users.
         """
         if not self.stats_enabled:
-            await self.db.updates._end_background_update("populate_stats_process_users")
+            await self.db_pool.updates._end_background_update(
+                "populate_stats_process_users"
+            )
             return 1
 
         last_user_id = progress.get("last_user_id", "")
@@ -117,22 +122,24 @@ class StatsStore(StateDeltasStore):
             txn.execute(sql, (last_user_id, batch_size))
             return [r for r, in txn]
 
-        users_to_work_on = await self.db.runInteraction(
+        users_to_work_on = await self.db_pool.runInteraction(
             "_populate_stats_process_users", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not users_to_work_on:
-            await self.db.updates._end_background_update("populate_stats_process_users")
+            await self.db_pool.updates._end_background_update(
+                "populate_stats_process_users"
+            )
             return 1
 
         for user_id in users_to_work_on:
             await self._calculate_and_set_initial_state_for_user(user_id)
             progress["last_user_id"] = user_id
 
-        await self.db.runInteraction(
+        await self.db_pool.runInteraction(
             "populate_stats_process_users",
-            self.db.updates._background_update_progress_txn,
+            self.db_pool.updates._background_update_progress_txn,
             "populate_stats_process_users",
             progress,
         )
@@ -141,10 +148,31 @@ class StatsStore(StateDeltasStore):
 
     async def _populate_stats_process_rooms(self, progress, batch_size):
         """
+        This was a background update which regenerated statistics for rooms.
+
+        It has been replaced by StatsStore._populate_stats_process_rooms_2. This background
+        job has been scheduled to run as part of Synapse v1.0.0, and again now. To ensure
+        someone upgrading from <v1.0.0, this background task has been turned into a no-op
+        so that the potentially expensive task is not run twice.
+
+        Further context: https://github.com/matrix-org/synapse/pull/7977
+        """
+        await self.db_pool.updates._end_background_update(
+            "populate_stats_process_rooms"
+        )
+        return 1
+
+    async def _populate_stats_process_rooms_2(self, progress, batch_size):
+        """
         This is a background update which regenerates statistics for rooms.
+
+        It replaces StatsStore._populate_stats_process_rooms. See its docstring for the
+        reasoning.
         """
         if not self.stats_enabled:
-            await self.db.updates._end_background_update("populate_stats_process_rooms")
+            await self.db_pool.updates._end_background_update(
+                "populate_stats_process_rooms_2"
+            )
             return 1
 
         last_room_id = progress.get("last_room_id", "")
@@ -159,23 +187,25 @@ class StatsStore(StateDeltasStore):
             txn.execute(sql, (last_room_id, batch_size))
             return [r for r, in txn]
 
-        rooms_to_work_on = await self.db.runInteraction(
-            "populate_stats_rooms_get_batch", _get_next_batch
+        rooms_to_work_on = await self.db_pool.runInteraction(
+            "populate_stats_rooms_2_get_batch", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
-            await self.db.updates._end_background_update("populate_stats_process_rooms")
+            await self.db_pool.updates._end_background_update(
+                "populate_stats_process_rooms_2"
+            )
             return 1
 
         for room_id in rooms_to_work_on:
             await self._calculate_and_set_initial_state_for_room(room_id)
             progress["last_room_id"] = room_id
 
-        await self.db.runInteraction(
-            "_populate_stats_process_rooms",
-            self.db.updates._background_update_progress_txn,
-            "populate_stats_process_rooms",
+        await self.db_pool.runInteraction(
+            "_populate_stats_process_rooms_2",
+            self.db_pool.updates._background_update_progress_txn,
+            "populate_stats_process_rooms_2",
             progress,
         )
 
@@ -185,7 +215,7 @@ class StatsStore(StateDeltasStore):
         """
         Returns the stats processor positions.
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="stats_incremental_position",
             keyvalues={},
             retcol="stream_id",
@@ -214,7 +244,7 @@ class StatsStore(StateDeltasStore):
             if field and "\0" in field:
                 fields[col] = None
 
-        return self.db.simple_upsert(
+        return self.db_pool.simple_upsert(
             table="room_stats_state",
             keyvalues={"room_id": room_id},
             values=fields,
@@ -235,7 +265,7 @@ class StatsStore(StateDeltasStore):
             Deferred[list[dict]], where the dict has the keys of
             ABSOLUTE_STATS_FIELDS[stats_type],  and "bucket_size" and "end_ts".
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_statistics_for_subject",
             self._get_statistics_for_subject_txn,
             stats_type,
@@ -256,7 +286,7 @@ class StatsStore(StateDeltasStore):
             ABSOLUTE_STATS_FIELDS[stats_type] + PER_SLICE_FIELDS[stats_type]
         )
 
-        slice_list = self.db.simple_select_list_paginate_txn(
+        slice_list = self.db_pool.simple_select_list_paginate_txn(
             txn,
             table + "_historical",
             "end_ts",
@@ -282,7 +312,7 @@ class StatsStore(StateDeltasStore):
         """
         table, id_col = TYPE_TO_TABLE[stats_type]
 
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             "%s_current" % (table,),
             keyvalues={id_col: id},
             retcol="completed_delta_stream_id",
@@ -318,14 +348,14 @@ class StatsStore(StateDeltasStore):
                         complete_with_stream_id=stream_id,
                     )
 
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn,
                 table="stats_incremental_position",
                 keyvalues={},
                 updatevalues={"stream_id": stream_id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "bulk_update_stats_delta", _bulk_update_stats_delta_txn
         )
 
@@ -356,7 +386,7 @@ class StatsStore(StateDeltasStore):
                 Does not work with per-slice fields.
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "update_stats_delta",
             self._update_stats_delta_txn,
             ts,
@@ -491,17 +521,17 @@ class StatsStore(StateDeltasStore):
         else:
             self.database_engine.lock_table(txn, table)
             retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
-            current_row = self.db.simple_select_one_txn(
+            current_row = self.db_pool.simple_select_one_txn(
                 txn, table, keyvalues, retcols, allow_none=True
             )
             if current_row is None:
                 merged_dict = {**keyvalues, **absolutes, **additive_relatives}
-                self.db.simple_insert_txn(txn, table, merged_dict)
+                self.db_pool.simple_insert_txn(txn, table, merged_dict)
             else:
                 for (key, val) in additive_relatives.items():
                     current_row[key] += val
                 current_row.update(absolutes)
-                self.db.simple_update_one_txn(txn, table, keyvalues, current_row)
+                self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
 
     def _upsert_copy_from_table_with_additive_relatives_txn(
         self,
@@ -588,11 +618,11 @@ class StatsStore(StateDeltasStore):
             txn.execute(sql, qargs)
         else:
             self.database_engine.lock_table(txn, into_table)
-            src_row = self.db.simple_select_one_txn(
+            src_row = self.db_pool.simple_select_one_txn(
                 txn, src_table, keyvalues, copy_columns
             )
             all_dest_keyvalues = {**keyvalues, **extra_dst_keyvalues}
-            dest_current_row = self.db.simple_select_one_txn(
+            dest_current_row = self.db_pool.simple_select_one_txn(
                 txn,
                 into_table,
                 keyvalues=all_dest_keyvalues,
@@ -608,11 +638,13 @@ class StatsStore(StateDeltasStore):
                     **src_row,
                     **additive_relatives,
                 }
-                self.db.simple_insert_txn(txn, into_table, merged_dict)
+                self.db_pool.simple_insert_txn(txn, into_table, merged_dict)
             else:
                 for (key, val) in additive_relatives.items():
                     src_row[key] = dest_current_row[key] + val
-                self.db.simple_update_txn(txn, into_table, all_dest_keyvalues, src_row)
+                self.db_pool.simple_update_txn(
+                    txn, into_table, all_dest_keyvalues, src_row
+                )
 
     def get_changes_room_total_events_and_bytes(self, min_pos, max_pos):
         """Fetches the counts of events in the given range of stream IDs.
@@ -626,7 +658,7 @@ class StatsStore(StateDeltasStore):
             changes.
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "stats_incremental_total_events_and_bytes",
             self.get_changes_room_total_events_and_bytes_txn,
             min_pos,
@@ -709,7 +741,7 @@ class StatsStore(StateDeltasStore):
         def _fetch_current_state_stats(txn):
             pos = self.get_room_max_stream_ordering()
 
-            rows = self.db.simple_select_many_txn(
+            rows = self.db_pool.simple_select_many_txn(
                 txn,
                 table="current_state_events",
                 column="type",
@@ -765,7 +797,7 @@ class StatsStore(StateDeltasStore):
             current_state_events_count,
             users_in_room,
             pos,
-        ) = await self.db.runInteraction(
+        ) = await self.db_pool.runInteraction(
             "get_initial_state_for_room", _fetch_current_state_stats
         )
 
@@ -839,7 +871,7 @@ class StatsStore(StateDeltasStore):
             (count,) = txn.fetchone()
             return count, pos
 
-        joined_rooms, pos = await self.db.runInteraction(
+        joined_rooms, pos = await self.db_pool.runInteraction(
             "calculate_and_set_initial_state_for_user",
             _calculate_and_set_initial_state_for_user_txn,
         )
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/databases/main/stream.py
index 10d39b3699..aaf225894e 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -39,13 +39,14 @@ what sort order was used:
 import abc
 import logging
 from collections import namedtuple
+from typing import Optional
 
 from twisted.internet import defer
 
 from synapse.logging.context import make_deferred_yieldable, run_in_background
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
-from synapse.storage.database import Database, make_in_list_sql_clause
+from synapse.storage.database import DatabasePool, make_in_list_sql_clause
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
 from synapse.storage.engines import PostgresEngine
 from synapse.types import RoomStreamToken
 from synapse.util.caches.stream_change_cache import StreamChangeCache
@@ -250,7 +251,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     __metaclass__ = abc.ABCMeta
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StreamWorkerStore, self).__init__(database, db_conn, hs)
 
         self._instance_name = hs.get_instance_name()
@@ -264,7 +265,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         self._need_to_reset_federation_stream_positions = self._send_federation
 
         events_max = self.get_room_max_stream_ordering()
-        event_cache_prefill, min_event_val = self.db.get_cache_dict(
+        event_cache_prefill, min_event_val = self.db_pool.get_cache_dict(
             db_conn,
             "events",
             entity_column="room_id",
@@ -409,7 +410,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             rows = [_EventDictReturn(row[0], None, row[1]) for row in txn]
             return rows
 
-        rows = yield self.db.runInteraction("get_room_events_stream_for_room", f)
+        rows = yield self.db_pool.runInteraction("get_room_events_stream_for_room", f)
 
         ret = yield self.get_events_as_list(
             [r.event_id for r in rows], get_prev_content=True
@@ -459,7 +460,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
             return rows
 
-        rows = yield self.db.runInteraction("get_membership_changes_for_user", f)
+        rows = yield self.db_pool.runInteraction("get_membership_changes_for_user", f)
 
         ret = yield self.get_events_as_list(
             [r.event_id for r in rows], get_prev_content=True
@@ -518,7 +519,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
         end_token = RoomStreamToken.parse(end_token)
 
-        rows, token = yield self.db.runInteraction(
+        rows, token = yield self.db_pool.runInteraction(
             "get_recent_event_ids_for_room",
             self._paginate_room_events_txn,
             room_id,
@@ -555,21 +556,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             txn.execute(sql, (room_id, stream_ordering))
             return txn.fetchone()
 
-        return self.db.runInteraction("get_room_event_before_stream_ordering", _f)
+        return self.db_pool.runInteraction("get_room_event_before_stream_ordering", _f)
 
-    @defer.inlineCallbacks
-    def get_room_events_max_id(self, room_id=None):
+    async def get_room_events_max_id(self, room_id: Optional[str] = None) -> str:
         """Returns the current token for rooms stream.
 
         By default, it returns the current global stream token. Specifying a
         `room_id` causes it to return the current room specific topological
         token.
         """
-        token = yield self.get_room_max_stream_ordering()
+        token = self.get_room_max_stream_ordering()
         if room_id is None:
             return "s%d" % (token,)
         else:
-            topo = yield self.db.runInteraction(
+            topo = await self.db_pool.runInteraction(
                 "_get_max_topological_txn", self._get_max_topological_txn, room_id
             )
             return "t%d-%d" % (topo, token)
@@ -583,7 +583,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         Returns:
             A deferred "s%d" stream token.
         """
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="events", keyvalues={"event_id": event_id}, retcol="stream_ordering"
         ).addCallback(lambda row: "s%d" % (row,))
 
@@ -596,7 +596,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         Returns:
             A deferred "t%d-%d" topological token.
         """
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="events",
             keyvalues={"event_id": event_id},
             retcols=("stream_ordering", "topological_ordering"),
@@ -620,7 +620,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             "SELECT coalesce(max(topological_ordering), 0) FROM events"
             " WHERE room_id = ? AND stream_ordering < ?"
         )
-        return self.db.execute(
+        return self.db_pool.execute(
             "get_max_topological_token", None, sql, room_id, stream_key
         ).addCallback(lambda r: r[0][0] if r else 0)
 
@@ -674,7 +674,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             dict
         """
 
-        results = yield self.db.runInteraction(
+        results = yield self.db_pool.runInteraction(
             "get_events_around",
             self._get_events_around_txn,
             room_id,
@@ -716,7 +716,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
             dict
         """
 
-        results = self.db.simple_select_one_txn(
+        results = self.db_pool.simple_select_one_txn(
             txn,
             "events",
             keyvalues={"event_id": event_id, "room_id": room_id},
@@ -795,7 +795,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
             return upper_bound, [row[1] for row in rows]
 
-        upper_bound, event_ids = yield self.db.runInteraction(
+        upper_bound, event_ids = yield self.db_pool.runInteraction(
             "get_all_new_events_stream", get_all_new_events_stream_txn
         )
 
@@ -805,12 +805,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     async def get_federation_out_pos(self, typ: str) -> int:
         if self._need_to_reset_federation_stream_positions:
-            await self.db.runInteraction(
+            await self.db_pool.runInteraction(
                 "_reset_federation_positions_txn", self._reset_federation_positions_txn
             )
             self._need_to_reset_federation_stream_positions = False
 
-        return await self.db.simple_select_one_onecol(
+        return await self.db_pool.simple_select_one_onecol(
             table="federation_stream_position",
             retcol="stream_id",
             keyvalues={"type": typ, "instance_name": self._instance_name},
@@ -819,12 +819,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
 
     async def update_federation_out_pos(self, typ, stream_id):
         if self._need_to_reset_federation_stream_positions:
-            await self.db.runInteraction(
+            await self.db_pool.runInteraction(
                 "_reset_federation_positions_txn", self._reset_federation_positions_txn
             )
             self._need_to_reset_federation_stream_positions = False
 
-        return await self.db.simple_update_one(
+        return await self.db_pool.simple_update_one(
             table="federation_stream_position",
             keyvalues={"type": typ, "instance_name": self._instance_name},
             updatevalues={"stream_id": stream_id},
@@ -854,7 +854,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         elif self._instance_name not in configured_instances:
             return
 
-        instances_in_table = self.db.simple_select_onecol_txn(
+        instances_in_table = self.db_pool.simple_select_onecol_txn(
             txn,
             table="federation_stream_position",
             keyvalues={},
@@ -885,7 +885,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         txn.execute(sql % (clause,), args)
 
         for typ, stream_id in min_positions.items():
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="federation_stream_position",
                 keyvalues={"type": typ, "instance_name": self._instance_name},
@@ -1036,7 +1036,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
         if to_key:
             to_key = RoomStreamToken.parse(to_key)
 
-        rows, token = yield self.db.runInteraction(
+        rows, token = yield self.db_pool.runInteraction(
             "paginate_room_events",
             self._paginate_room_events_txn,
             room_id,
diff --git a/synapse/storage/data_stores/main/tags.py b/synapse/storage/databases/main/tags.py
index bd7227773a..eedd2d96c3 100644
--- a/synapse/storage/data_stores/main/tags.py
+++ b/synapse/storage/databases/main/tags.py
@@ -22,7 +22,7 @@ from canonicaljson import json
 from twisted.internet import defer
 
 from synapse.storage._base import db_to_json
-from synapse.storage.data_stores.main.account_data import AccountDataWorkerStore
+from synapse.storage.databases.main.account_data import AccountDataWorkerStore
 from synapse.util.caches.descriptors import cached
 
 logger = logging.getLogger(__name__)
@@ -41,7 +41,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             tag strings to tag content.
         """
 
-        deferred = self.db.simple_select_list(
+        deferred = self.db_pool.simple_select_list(
             "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"]
         )
 
@@ -92,7 +92,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
             txn.execute(sql, (last_id, current_id, limit))
             return txn.fetchall()
 
-        tag_ids = await self.db.runInteraction(
+        tag_ids = await self.db_pool.runInteraction(
             "get_all_updated_tags", get_all_updated_tags_txn
         )
 
@@ -112,7 +112,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
         batch_size = 50
         results = []
         for i in range(0, len(tag_ids), batch_size):
-            tags = await self.db.runInteraction(
+            tags = await self.db_pool.runInteraction(
                 "get_all_updated_tag_content",
                 get_tag_content,
                 tag_ids[i : i + batch_size],
@@ -155,7 +155,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
         if not changed:
             return {}
 
-        room_ids = yield self.db.runInteraction(
+        room_ids = yield self.db_pool.runInteraction(
             "get_updated_tags", get_updated_tags_txn
         )
 
@@ -175,7 +175,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
         Returns:
             A deferred list of string tags.
         """
-        return self.db.simple_select_list(
+        return self.db_pool.simple_select_list(
             table="room_tags",
             keyvalues={"user_id": user_id, "room_id": room_id},
             retcols=("tag", "content"),
@@ -200,7 +200,7 @@ class TagsStore(TagsWorkerStore):
         content_json = json.dumps(content)
 
         def add_tag_txn(txn, next_id):
-            self.db.simple_upsert_txn(
+            self.db_pool.simple_upsert_txn(
                 txn,
                 table="room_tags",
                 keyvalues={"user_id": user_id, "room_id": room_id, "tag": tag},
@@ -209,7 +209,7 @@ class TagsStore(TagsWorkerStore):
             self._update_revision_txn(txn, user_id, room_id, next_id)
 
         with self._account_data_id_gen.get_next() as next_id:
-            yield self.db.runInteraction("add_tag", add_tag_txn, next_id)
+            yield self.db_pool.runInteraction("add_tag", add_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
 
@@ -232,7 +232,7 @@ class TagsStore(TagsWorkerStore):
             self._update_revision_txn(txn, user_id, room_id, next_id)
 
         with self._account_data_id_gen.get_next() as next_id:
-            yield self.db.runInteraction("remove_tag", remove_tag_txn, next_id)
+            yield self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id)
 
         self.get_tags_for_user.invalidate((user_id,))
 
diff --git a/synapse/storage/data_stores/main/transactions.py b/synapse/storage/databases/main/transactions.py
index a9bf457939..8804c0e4ac 100644
--- a/synapse/storage/data_stores/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -22,7 +22,7 @@ from twisted.internet import defer
 
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.util.caches.expiringcache import ExpiringCache
 
 db_binary_type = memoryview
@@ -46,7 +46,7 @@ class TransactionStore(SQLBaseStore):
     """A collection of queries for handling PDUs.
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(TransactionStore, self).__init__(database, db_conn, hs)
 
         self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
@@ -71,7 +71,7 @@ class TransactionStore(SQLBaseStore):
             this transaction or a 2-tuple of (int, dict)
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_received_txn_response",
             self._get_received_txn_response,
             transaction_id,
@@ -79,7 +79,7 @@ class TransactionStore(SQLBaseStore):
         )
 
     def _get_received_txn_response(self, txn, transaction_id, origin):
-        result = self.db.simple_select_one_txn(
+        result = self.db_pool.simple_select_one_txn(
             txn,
             table="received_transactions",
             keyvalues={"transaction_id": transaction_id, "origin": origin},
@@ -113,7 +113,7 @@ class TransactionStore(SQLBaseStore):
             response_json (str)
         """
 
-        return self.db.simple_insert(
+        return self.db_pool.simple_insert(
             table="received_transactions",
             values={
                 "transaction_id": transaction_id,
@@ -142,7 +142,7 @@ class TransactionStore(SQLBaseStore):
         if result is not SENTINEL:
             return result
 
-        result = yield self.db.runInteraction(
+        result = yield self.db_pool.runInteraction(
             "get_destination_retry_timings",
             self._get_destination_retry_timings,
             destination,
@@ -154,7 +154,7 @@ class TransactionStore(SQLBaseStore):
         return result
 
     def _get_destination_retry_timings(self, txn, destination):
-        result = self.db.simple_select_one_txn(
+        result = self.db_pool.simple_select_one_txn(
             txn,
             table="destinations",
             keyvalues={"destination": destination},
@@ -181,7 +181,7 @@ class TransactionStore(SQLBaseStore):
         """
 
         self._destination_retry_cache.pop(destination, None)
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "set_destination_retry_timings",
             self._set_destination_retry_timings,
             destination,
@@ -221,7 +221,7 @@ class TransactionStore(SQLBaseStore):
         # We need to be careful here as the data may have changed from under us
         # due to a worker setting the timings.
 
-        prev_row = self.db.simple_select_one_txn(
+        prev_row = self.db_pool.simple_select_one_txn(
             txn,
             table="destinations",
             keyvalues={"destination": destination},
@@ -230,7 +230,7 @@ class TransactionStore(SQLBaseStore):
         )
 
         if not prev_row:
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="destinations",
                 values={
@@ -241,7 +241,7 @@ class TransactionStore(SQLBaseStore):
                 },
             )
         elif retry_interval == 0 or prev_row["retry_interval"] < retry_interval:
-            self.db.simple_update_one_txn(
+            self.db_pool.simple_update_one_txn(
                 txn,
                 "destinations",
                 keyvalues={"destination": destination},
@@ -264,6 +264,6 @@ class TransactionStore(SQLBaseStore):
         def _cleanup_transactions_txn(txn):
             txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "_cleanup_transactions", _cleanup_transactions_txn
         )
diff --git a/synapse/storage/data_stores/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py
index 5f1b919748..37276f73f8 100644
--- a/synapse/storage/data_stores/main/ui_auth.py
+++ b/synapse/storage/databases/main/ui_auth.py
@@ -81,7 +81,7 @@ class UIAuthWorkerStore(SQLBaseStore):
             session_id = stringutils.random_string(24)
 
             try:
-                await self.db.simple_insert(
+                await self.db_pool.simple_insert(
                     table="ui_auth_sessions",
                     values={
                         "session_id": session_id,
@@ -97,7 +97,7 @@ class UIAuthWorkerStore(SQLBaseStore):
                 return UIAuthSessionData(
                     session_id, clientdict, uri, method, description
                 )
-            except self.db.engine.module.IntegrityError:
+            except self.db_pool.engine.module.IntegrityError:
                 attempts += 1
         raise StoreError(500, "Couldn't generate a session ID.")
 
@@ -111,7 +111,7 @@ class UIAuthWorkerStore(SQLBaseStore):
         Raises:
             StoreError if the session is not found.
         """
-        result = await self.db.simple_select_one(
+        result = await self.db_pool.simple_select_one(
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
             retcols=("clientdict", "uri", "method", "description"),
@@ -140,13 +140,13 @@ class UIAuthWorkerStore(SQLBaseStore):
         # Note that we need to allow for the same stage to complete multiple
         # times here so that registration is idempotent.
         try:
-            await self.db.simple_upsert(
+            await self.db_pool.simple_upsert(
                 table="ui_auth_sessions_credentials",
                 keyvalues={"session_id": session_id, "stage_type": stage_type},
                 values={"result": json.dumps(result)},
                 desc="mark_ui_auth_stage_complete",
             )
-        except self.db.engine.module.IntegrityError:
+        except self.db_pool.engine.module.IntegrityError:
             raise StoreError(400, "Unknown session ID: %s" % (session_id,))
 
     async def get_completed_ui_auth_stages(
@@ -162,7 +162,7 @@ class UIAuthWorkerStore(SQLBaseStore):
             that auth-type.
         """
         results = {}
-        for row in await self.db.simple_select_list(
+        for row in await self.db_pool.simple_select_list(
             table="ui_auth_sessions_credentials",
             keyvalues={"session_id": session_id},
             retcols=("stage_type", "result"),
@@ -186,7 +186,7 @@ class UIAuthWorkerStore(SQLBaseStore):
         # The clientdict gets stored as JSON.
         clientdict_json = json.dumps(clientdict)
 
-        await self.db.simple_update_one(
+        await self.db_pool.simple_update_one(
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
             updatevalues={"clientdict": clientdict_json},
@@ -206,7 +206,7 @@ class UIAuthWorkerStore(SQLBaseStore):
         Raises:
             StoreError if the session cannot be found.
         """
-        await self.db.runInteraction(
+        await self.db_pool.runInteraction(
             "set_ui_auth_session_data",
             self._set_ui_auth_session_data_txn,
             session_id,
@@ -216,7 +216,7 @@ class UIAuthWorkerStore(SQLBaseStore):
 
     def _set_ui_auth_session_data_txn(self, txn, session_id: str, key: str, value: Any):
         # Get the current value.
-        result = self.db.simple_select_one_txn(
+        result = self.db_pool.simple_select_one_txn(
             txn,
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
@@ -227,7 +227,7 @@ class UIAuthWorkerStore(SQLBaseStore):
         serverdict = db_to_json(result["serverdict"])
         serverdict[key] = value
 
-        self.db.simple_update_one_txn(
+        self.db_pool.simple_update_one_txn(
             txn,
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
@@ -247,7 +247,7 @@ class UIAuthWorkerStore(SQLBaseStore):
         Raises:
             StoreError if the session cannot be found.
         """
-        result = await self.db.simple_select_one(
+        result = await self.db_pool.simple_select_one(
             table="ui_auth_sessions",
             keyvalues={"session_id": session_id},
             retcols=("serverdict",),
@@ -269,7 +269,7 @@ class UIAuthStore(UIAuthWorkerStore):
                 This is an epoch time in milliseconds.
 
         """
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_old_ui_auth_sessions",
             self._delete_old_ui_auth_sessions_txn,
             expiration_time,
@@ -282,7 +282,7 @@ class UIAuthStore(UIAuthWorkerStore):
         session_ids = [r[0] for r in txn.fetchall()]
 
         # Delete the corresponding completed credentials.
-        self.db.simple_delete_many_txn(
+        self.db_pool.simple_delete_many_txn(
             txn,
             table="ui_auth_sessions_credentials",
             column="session_id",
@@ -291,7 +291,7 @@ class UIAuthStore(UIAuthWorkerStore):
         )
 
         # Finally, delete the sessions.
-        self.db.simple_delete_many_txn(
+        self.db_pool.simple_delete_many_txn(
             txn,
             table="ui_auth_sessions",
             column="session_id",
diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 942e51fd3a..d73a8e8ab9 100644
--- a/synapse/storage/data_stores/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -19,9 +19,9 @@ import re
 from twisted.internet import defer
 
 from synapse.api.constants import EventTypes, JoinRules
-from synapse.storage.data_stores.main.state import StateFilter
-from synapse.storage.data_stores.main.state_deltas import StateDeltasStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.main.state import StateFilter
+from synapse.storage.databases.main.state_deltas import StateDeltasStore
 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
 from synapse.types import get_domain_from_id, get_localpart_from_id
 from synapse.util.caches.descriptors import cached
@@ -38,24 +38,24 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
     # add_users_who_share_private_rooms?
     SHARE_PRIVATE_WORKING_SET = 500
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(UserDirectoryBackgroundUpdateStore, self).__init__(database, db_conn, hs)
 
         self.server_name = hs.hostname
 
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "populate_user_directory_createtables",
             self._populate_user_directory_createtables,
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "populate_user_directory_process_rooms",
             self._populate_user_directory_process_rooms,
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "populate_user_directory_process_users",
             self._populate_user_directory_process_users,
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             "populate_user_directory_cleanup", self._populate_user_directory_cleanup
         )
 
@@ -85,7 +85,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             """
             txn.execute(sql)
             rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()]
-            self.db.simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms)
+            self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms)
             del rooms
 
             # If search all users is on, get all the users we want to add.
@@ -100,15 +100,17 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                 txn.execute("SELECT name FROM users")
                 users = [{"user_id": x[0]} for x in txn.fetchall()]
 
-                self.db.simple_insert_many_txn(txn, TEMP_TABLE + "_users", users)
+                self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_users", users)
 
         new_pos = yield self.get_max_stream_id_in_current_state_deltas()
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "populate_user_directory_temp_build", _make_staging_area
         )
-        yield self.db.simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
+        yield self.db_pool.simple_insert(
+            TEMP_TABLE + "_position", {"position": new_pos}
+        )
 
-        yield self.db.updates._end_background_update(
+        yield self.db_pool.updates._end_background_update(
             "populate_user_directory_createtables"
         )
         return 1
@@ -118,7 +120,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         """
         Update the user directory stream position, then clean up the old tables.
         """
-        position = yield self.db.simple_select_one_onecol(
+        position = yield self.db_pool.simple_select_one_onecol(
             TEMP_TABLE + "_position", None, "position"
         )
         yield self.update_user_directory_stream_pos(position)
@@ -128,11 +130,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
             txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
 
-        yield self.db.runInteraction(
+        yield self.db_pool.runInteraction(
             "populate_user_directory_cleanup", _delete_staging_area
         )
 
-        yield self.db.updates._end_background_update("populate_user_directory_cleanup")
+        yield self.db_pool.updates._end_background_update(
+            "populate_user_directory_cleanup"
+        )
         return 1
 
     @defer.inlineCallbacks
@@ -172,13 +176,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
             return rooms_to_work_on
 
-        rooms_to_work_on = yield self.db.runInteraction(
+        rooms_to_work_on = yield self.db_pool.runInteraction(
             "populate_user_directory_temp_read", _get_next_batch
         )
 
         # No more rooms -- complete the transaction.
         if not rooms_to_work_on:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_rooms"
             )
             return 1
@@ -249,12 +253,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                         to_insert.clear()
 
             # We've finished a room. Delete it from the table.
-            yield self.db.simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id})
+            yield self.db_pool.simple_delete_one(
+                TEMP_TABLE + "_rooms", {"room_id": room_id}
+            )
             # Update the remaining counter.
             progress["remaining"] -= 1
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "populate_user_directory",
-                self.db.updates._background_update_progress_txn,
+                self.db_pool.updates._background_update_progress_txn,
                 "populate_user_directory_process_rooms",
                 progress,
             )
@@ -273,7 +279,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         If search_all_users is enabled, add all of the users to the user directory.
         """
         if not self.hs.config.user_directory_search_all_users:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_users"
             )
             return 1
@@ -299,13 +305,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
             return users_to_work_on
 
-        users_to_work_on = yield self.db.runInteraction(
+        users_to_work_on = yield self.db_pool.runInteraction(
             "populate_user_directory_temp_read", _get_next_batch
         )
 
         # No more users -- complete the transaction.
         if not users_to_work_on:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 "populate_user_directory_process_users"
             )
             return 1
@@ -322,12 +328,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             )
 
             # We've finished processing a user. Delete it from the table.
-            yield self.db.simple_delete_one(TEMP_TABLE + "_users", {"user_id": user_id})
+            yield self.db_pool.simple_delete_one(
+                TEMP_TABLE + "_users", {"user_id": user_id}
+            )
             # Update the remaining counter.
             progress["remaining"] -= 1
-            yield self.db.runInteraction(
+            yield self.db_pool.runInteraction(
                 "populate_user_directory",
-                self.db.updates._background_update_progress_txn,
+                self.db_pool.updates._background_update_progress_txn,
                 "populate_user_directory_process_users",
                 progress,
             )
@@ -371,7 +379,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         """
 
         def _update_profile_in_user_dir_txn(txn):
-            new_entry = self.db.simple_upsert_txn(
+            new_entry = self.db_pool.simple_upsert_txn(
                 txn,
                 table="user_directory",
                 keyvalues={"user_id": user_id},
@@ -445,7 +453,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                         )
             elif isinstance(self.database_engine, Sqlite3Engine):
                 value = "%s %s" % (user_id, display_name) if display_name else user_id
-                self.db.simple_upsert_txn(
+                self.db_pool.simple_upsert_txn(
                     txn,
                     table="user_directory_search",
                     keyvalues={"user_id": user_id},
@@ -458,7 +466,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
             txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "update_profile_in_user_dir", _update_profile_in_user_dir_txn
         )
 
@@ -472,7 +480,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         """
 
         def _add_users_who_share_room_txn(txn):
-            self.db.simple_upsert_many_txn(
+            self.db_pool.simple_upsert_many_txn(
                 txn,
                 table="users_who_share_private_rooms",
                 key_names=["user_id", "other_user_id", "room_id"],
@@ -484,7 +492,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                 value_values=None,
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "add_users_who_share_room", _add_users_who_share_room_txn
         )
 
@@ -499,7 +507,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 
         def _add_users_in_public_rooms_txn(txn):
 
-            self.db.simple_upsert_many_txn(
+            self.db_pool.simple_upsert_many_txn(
                 txn,
                 table="users_in_public_rooms",
                 key_names=["user_id", "room_id"],
@@ -508,7 +516,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
                 value_values=None,
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "add_users_in_public_rooms", _add_users_in_public_rooms_txn
         )
 
@@ -523,13 +531,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
             txn.execute("DELETE FROM users_who_share_private_rooms")
             txn.call_after(self.get_user_in_directory.invalidate_all)
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "delete_all_from_user_dir", _delete_all_from_user_dir_txn
         )
 
     @cached()
     def get_user_in_directory(self, user_id):
-        return self.db.simple_select_one(
+        return self.db_pool.simple_select_one(
             table="user_directory",
             keyvalues={"user_id": user_id},
             retcols=("display_name", "avatar_url"),
@@ -538,7 +546,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
         )
 
     def update_user_directory_stream_pos(self, stream_id):
-        return self.db.simple_update_one(
+        return self.db_pool.simple_update_one(
             table="user_directory_stream_pos",
             keyvalues={},
             updatevalues={"stream_id": stream_id},
@@ -552,47 +560,49 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
     # add_users_who_share_private_rooms?
     SHARE_PRIVATE_WORKING_SET = 500
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(UserDirectoryStore, self).__init__(database, db_conn, hs)
 
     def remove_from_user_dir(self, user_id):
         def _remove_from_user_dir_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="user_directory", keyvalues={"user_id": user_id}
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="user_directory_search", keyvalues={"user_id": user_id}
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="users_in_public_rooms", keyvalues={"user_id": user_id}
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="users_who_share_private_rooms",
                 keyvalues={"user_id": user_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="users_who_share_private_rooms",
                 keyvalues={"other_user_id": user_id},
             )
             txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
 
-        return self.db.runInteraction("remove_from_user_dir", _remove_from_user_dir_txn)
+        return self.db_pool.runInteraction(
+            "remove_from_user_dir", _remove_from_user_dir_txn
+        )
 
     @defer.inlineCallbacks
     def get_users_in_dir_due_to_room(self, room_id):
         """Get all user_ids that are in the room directory because they're
         in the given room_id
         """
-        user_ids_share_pub = yield self.db.simple_select_onecol(
+        user_ids_share_pub = yield self.db_pool.simple_select_onecol(
             table="users_in_public_rooms",
             keyvalues={"room_id": room_id},
             retcol="user_id",
             desc="get_users_in_dir_due_to_room",
         )
 
-        user_ids_share_priv = yield self.db.simple_select_onecol(
+        user_ids_share_priv = yield self.db_pool.simple_select_onecol(
             table="users_who_share_private_rooms",
             keyvalues={"room_id": room_id},
             retcol="other_user_id",
@@ -615,23 +625,23 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         """
 
         def _remove_user_who_share_room_txn(txn):
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="users_who_share_private_rooms",
                 keyvalues={"user_id": user_id, "room_id": room_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="users_who_share_private_rooms",
                 keyvalues={"other_user_id": user_id, "room_id": room_id},
             )
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn,
                 table="users_in_public_rooms",
                 keyvalues={"user_id": user_id, "room_id": room_id},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "remove_user_who_share_room", _remove_user_who_share_room_txn
         )
 
@@ -646,14 +656,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
         Returns:
             list: user_id
         """
-        rows = yield self.db.simple_select_onecol(
+        rows = yield self.db_pool.simple_select_onecol(
             table="users_who_share_private_rooms",
             keyvalues={"user_id": user_id},
             retcol="room_id",
             desc="get_rooms_user_is_in",
         )
 
-        pub_rows = yield self.db.simple_select_onecol(
+        pub_rows = yield self.db_pool.simple_select_onecol(
             table="users_in_public_rooms",
             keyvalues={"user_id": user_id},
             retcol="room_id",
@@ -684,14 +694,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             ) f2 USING (room_id)
         """
 
-        rows = yield self.db.execute(
+        rows = yield self.db_pool.execute(
             "get_rooms_in_common_for_users", None, sql, user_id, other_user_id
         )
 
         return [room_id for room_id, in rows]
 
     def get_user_directory_stream_pos(self):
-        return self.db.simple_select_one_onecol(
+        return self.db_pool.simple_select_one_onecol(
             table="user_directory_stream_pos",
             keyvalues={},
             retcol="stream_id",
@@ -796,8 +806,8 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
             # This should be unreachable.
             raise Exception("Unrecognized database engine")
 
-        results = yield self.db.execute(
-            "search_user_dir", self.db.cursor_to_dict, sql, *args
+        results = yield self.db_pool.execute(
+            "search_user_dir", self.db_pool.cursor_to_dict, sql, *args
         )
 
         limited = len(results) > limit
diff --git a/synapse/storage/data_stores/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py
index d3038ff06d..ab6cb2c1f6 100644
--- a/synapse/storage/data_stores/main/user_erasure_store.py
+++ b/synapse/storage/databases/main/user_erasure_store.py
@@ -31,7 +31,7 @@ class UserErasureWorkerStore(SQLBaseStore):
         Returns:
             Deferred[bool]: True if the user has requested erasure
         """
-        return self.db.simple_select_onecol(
+        return self.db_pool.simple_select_onecol(
             table="erased_users",
             keyvalues={"user_id": user_id},
             retcol="1",
@@ -56,7 +56,7 @@ class UserErasureWorkerStore(SQLBaseStore):
         # iterate it multiple times, and (b) avoiding duplicates.
         user_ids = tuple(set(user_ids))
 
-        rows = yield self.db.simple_select_many_batch(
+        rows = yield self.db_pool.simple_select_many_batch(
             table="erased_users",
             column="user_id",
             iterable=user_ids,
@@ -88,7 +88,7 @@ class UserErasureStore(UserErasureWorkerStore):
 
             self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
 
-        return self.db.runInteraction("mark_user_erased", f)
+        return self.db_pool.runInteraction("mark_user_erased", f)
 
     def mark_user_not_erased(self, user_id: str) -> None:
         """Indicate that user_id is no longer erased.
@@ -110,4 +110,4 @@ class UserErasureStore(UserErasureWorkerStore):
 
             self._invalidate_cache_and_stream(txn, self.is_user_erased, (user_id,))
 
-        return self.db.runInteraction("mark_user_not_erased", f)
+        return self.db_pool.runInteraction("mark_user_not_erased", f)
diff --git a/synapse/storage/data_stores/state/__init__.py b/synapse/storage/databases/state/__init__.py
index 86e09f6229..c90d022899 100644
--- a/synapse/storage/data_stores/state/__init__.py
+++ b/synapse/storage/databases/state/__init__.py
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from synapse.storage.data_stores.state.store import StateGroupDataStore  # noqa: F401
+from synapse.storage.databases.state.store import StateGroupDataStore  # noqa: F401
diff --git a/synapse/storage/data_stores/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index be1fe97d79..1e2d584098 100644
--- a/synapse/storage/data_stores/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -18,7 +18,7 @@ import logging
 from twisted.internet import defer
 
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
 from synapse.storage.engines import PostgresEngine
 from synapse.storage.state import StateFilter
 
@@ -62,7 +62,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
             count = 0
 
             while next_group:
-                next_group = self.db.simple_select_one_onecol_txn(
+                next_group = self.db_pool.simple_select_one_onecol_txn(
                     txn,
                     table="state_group_edges",
                     keyvalues={"state_group": next_group},
@@ -165,7 +165,7 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
                     ):
                         break
 
-                    next_group = self.db.simple_select_one_onecol_txn(
+                    next_group = self.db_pool.simple_select_one_onecol_txn(
                         txn,
                         table="state_group_edges",
                         keyvalues={"state_group": next_group},
@@ -182,16 +182,16 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
     STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
     STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
             self._background_deduplicate_state,
         )
-        self.db.updates.register_background_update_handler(
+        self.db_pool.updates.register_background_update_handler(
             self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
         )
-        self.db.updates.register_background_index_update(
+        self.db_pool.updates.register_background_index_update(
             self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME,
             index_name="state_groups_room_id_idx",
             table="state_groups",
@@ -212,7 +212,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
         batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
 
         if max_group is None:
-            rows = yield self.db.execute(
+            rows = yield self.db_pool.execute(
                 "_background_deduplicate_state",
                 None,
                 "SELECT coalesce(max(id), 0) FROM state_groups",
@@ -282,13 +282,13 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                             if prev_state.get(key, None) != value
                         }
 
-                        self.db.simple_delete_txn(
+                        self.db_pool.simple_delete_txn(
                             txn,
                             table="state_group_edges",
                             keyvalues={"state_group": state_group},
                         )
 
-                        self.db.simple_insert_txn(
+                        self.db_pool.simple_insert_txn(
                             txn,
                             table="state_group_edges",
                             values={
@@ -297,13 +297,13 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                             },
                         )
 
-                        self.db.simple_delete_txn(
+                        self.db_pool.simple_delete_txn(
                             txn,
                             table="state_groups_state",
                             keyvalues={"state_group": state_group},
                         )
 
-                        self.db.simple_insert_many_txn(
+                        self.db_pool.simple_insert_many_txn(
                             txn,
                             table="state_groups_state",
                             values=[
@@ -324,18 +324,18 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                 "max_group": max_group,
             }
 
-            self.db.updates._background_update_progress_txn(
+            self.db_pool.updates._background_update_progress_txn(
                 txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
             )
 
             return False, batch_size
 
-        finished, result = yield self.db.runInteraction(
+        finished, result = yield self.db_pool.runInteraction(
             self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
         )
 
         if finished:
-            yield self.db.updates._end_background_update(
+            yield self.db_pool.updates._end_background_update(
                 self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
             )
 
@@ -365,8 +365,10 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
                 )
                 txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
 
-        yield self.db.runWithConnection(reindex_txn)
+        yield self.db_pool.runWithConnection(reindex_txn)
 
-        yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
+        yield self.db_pool.updates._end_background_update(
+            self.STATE_GROUP_INDEX_UPDATE_NAME
+        )
 
         return 1
diff --git a/synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql b/synapse/storage/databases/state/schema/delta/23/drop_state_index.sql
index ae09fa0065..ae09fa0065 100644
--- a/synapse/storage/data_stores/state/schema/delta/23/drop_state_index.sql
+++ b/synapse/storage/databases/state/schema/delta/23/drop_state_index.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/30/state_stream.sql b/synapse/storage/databases/state/schema/delta/30/state_stream.sql
index e85699e82e..e85699e82e 100644
--- a/synapse/storage/data_stores/state/schema/delta/30/state_stream.sql
+++ b/synapse/storage/databases/state/schema/delta/30/state_stream.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql b/synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql
index 1450313bfa..1450313bfa 100644
--- a/synapse/storage/data_stores/state/schema/delta/32/remove_state_indices.sql
+++ b/synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql b/synapse/storage/databases/state/schema/delta/35/add_state_index.sql
index 33980d02f0..33980d02f0 100644
--- a/synapse/storage/data_stores/state/schema/delta/35/add_state_index.sql
+++ b/synapse/storage/databases/state/schema/delta/35/add_state_index.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/35/state.sql b/synapse/storage/databases/state/schema/delta/35/state.sql
index 0f1fa68a89..0f1fa68a89 100644
--- a/synapse/storage/data_stores/state/schema/delta/35/state.sql
+++ b/synapse/storage/databases/state/schema/delta/35/state.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql b/synapse/storage/databases/state/schema/delta/35/state_dedupe.sql
index 97e5067ef4..97e5067ef4 100644
--- a/synapse/storage/data_stores/state/schema/delta/35/state_dedupe.sql
+++ b/synapse/storage/databases/state/schema/delta/35/state_dedupe.sql
diff --git a/synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py b/synapse/storage/databases/state/schema/delta/47/state_group_seq.py
index 9fd1ccf6f7..9fd1ccf6f7 100644
--- a/synapse/storage/data_stores/state/schema/delta/47/state_group_seq.py
+++ b/synapse/storage/databases/state/schema/delta/47/state_group_seq.py
diff --git a/synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql b/synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql
index 7916ef18b2..7916ef18b2 100644
--- a/synapse/storage/data_stores/state/schema/delta/56/state_group_room_idx.sql
+++ b/synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql
diff --git a/synapse/storage/data_stores/state/schema/full_schemas/54/full.sql b/synapse/storage/databases/state/schema/full_schemas/54/full.sql
index 35f97d6b3d..35f97d6b3d 100644
--- a/synapse/storage/data_stores/state/schema/full_schemas/54/full.sql
+++ b/synapse/storage/databases/state/schema/full_schemas/54/full.sql
diff --git a/synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres b/synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres
index fcd926c9fb..fcd926c9fb 100644
--- a/synapse/storage/data_stores/state/schema/full_schemas/54/sequence.sql.postgres
+++ b/synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/databases/state/store.py
index 7dada7f75f..7f104ad936 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -21,8 +21,8 @@ from twisted.internet import defer
 
 from synapse.api.constants import EventTypes
 from synapse.storage._base import SQLBaseStore
-from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
-from synapse.storage.database import Database
+from synapse.storage.database import DatabasePool
+from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
 from synapse.storage.state import StateFilter
 from synapse.storage.types import Cursor
 from synapse.storage.util.sequence import build_sequence_generator
@@ -53,7 +53,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
     """A data store for fetching/storing state groups.
     """
 
-    def __init__(self, database: Database, db_conn, hs):
+    def __init__(self, database: DatabasePool, db_conn, hs):
         super(StateGroupDataStore, self).__init__(database, db_conn, hs)
 
         # Originally the state store used a single DictionaryCache to cache the
@@ -112,7 +112,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         """
 
         def _get_state_group_delta_txn(txn):
-            prev_group = self.db.simple_select_one_onecol_txn(
+            prev_group = self.db_pool.simple_select_one_onecol_txn(
                 txn,
                 table="state_group_edges",
                 keyvalues={"state_group": state_group},
@@ -123,7 +123,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             if not prev_group:
                 return _GetStateGroupDelta(None, None)
 
-            delta_ids = self.db.simple_select_list_txn(
+            delta_ids = self.db_pool.simple_select_list_txn(
                 txn,
                 table="state_groups_state",
                 keyvalues={"state_group": state_group},
@@ -135,7 +135,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
                 {(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
             )
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "get_state_group_delta", _get_state_group_delta_txn
         )
 
@@ -156,7 +156,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
         chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
         for chunk in chunks:
-            res = await self.db.runInteraction(
+            res = await self.db_pool.runInteraction(
                 "_get_state_groups_from_groups",
                 self._get_state_groups_from_groups_txn,
                 chunk,
@@ -393,7 +393,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
             state_group = self._state_group_seq_gen.get_next_id_txn(txn)
 
-            self.db.simple_insert_txn(
+            self.db_pool.simple_insert_txn(
                 txn,
                 table="state_groups",
                 values={"id": state_group, "room_id": room_id, "event_id": event_id},
@@ -402,7 +402,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             # We persist as a delta if we can, while also ensuring the chain
             # of deltas isn't tooo long, as otherwise read performance degrades.
             if prev_group:
-                is_in_db = self.db.simple_select_one_onecol_txn(
+                is_in_db = self.db_pool.simple_select_one_onecol_txn(
                     txn,
                     table="state_groups",
                     keyvalues={"id": prev_group},
@@ -417,13 +417,13 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
                 potential_hops = self._count_state_group_hops_txn(txn, prev_group)
             if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
-                self.db.simple_insert_txn(
+                self.db_pool.simple_insert_txn(
                     txn,
                     table="state_group_edges",
                     values={"state_group": state_group, "prev_state_group": prev_group},
                 )
 
-                self.db.simple_insert_many_txn(
+                self.db_pool.simple_insert_many_txn(
                     txn,
                     table="state_groups_state",
                     values=[
@@ -438,7 +438,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
                     ],
                 )
             else:
-                self.db.simple_insert_many_txn(
+                self.db_pool.simple_insert_many_txn(
                     txn,
                     table="state_groups_state",
                     values=[
@@ -484,7 +484,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
 
             return state_group
 
-        return self.db.runInteraction("store_state_group", _store_state_group_txn)
+        return self.db_pool.runInteraction("store_state_group", _store_state_group_txn)
 
     def purge_unreferenced_state_groups(
         self, room_id: str, state_groups_to_delete
@@ -499,7 +499,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
                 to delete.
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "purge_unreferenced_state_groups",
             self._purge_unreferenced_state_groups,
             room_id,
@@ -511,7 +511,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             "[purge] found %i state groups to delete", len(state_groups_to_delete)
         )
 
-        rows = self.db.simple_select_many_txn(
+        rows = self.db_pool.simple_select_many_txn(
             txn,
             table="state_group_edges",
             column="prev_state_group",
@@ -538,15 +538,15 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
             curr_state = curr_state[sg]
 
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="state_groups_state", keyvalues={"state_group": sg}
             )
 
-            self.db.simple_delete_txn(
+            self.db_pool.simple_delete_txn(
                 txn, table="state_group_edges", keyvalues={"state_group": sg}
             )
 
-            self.db.simple_insert_many_txn(
+            self.db_pool.simple_insert_many_txn(
                 txn,
                 table="state_groups_state",
                 values=[
@@ -583,7 +583,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             A mapping from state group to previous state group.
         """
 
-        rows = await self.db.simple_select_many_batch(
+        rows = await self.db_pool.simple_select_many_batch(
             table="state_group_edges",
             column="prev_state_group",
             iterable=state_groups,
@@ -602,7 +602,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
             state_groups_to_delete (list[int]): State groups to delete
         """
 
-        return self.db.runInteraction(
+        return self.db_pool.runInteraction(
             "purge_room_state",
             self._purge_room_state_txn,
             room_id,
@@ -613,7 +613,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         # first we have to delete the state groups states
         logger.info("[purge] removing %s from state_groups_state", room_id)
 
-        self.db.simple_delete_many_txn(
+        self.db_pool.simple_delete_many_txn(
             txn,
             table="state_groups_state",
             column="state_group",
@@ -624,7 +624,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         # ... and the state group edges
         logger.info("[purge] removing %s from state_group_edges", room_id)
 
-        self.db.simple_delete_many_txn(
+        self.db_pool.simple_delete_many_txn(
             txn,
             table="state_group_edges",
             column="state_group",
@@ -635,7 +635,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
         # ... and the state groups
         logger.info("[purge] removing %s from state_groups", room_id)
 
-        self.db.simple_delete_many_txn(
+        self.db_pool.simple_delete_many_txn(
             txn,
             table="state_groups",
             column="id",
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index 4a164834d9..f15b95e633 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -29,8 +29,8 @@ from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
 from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.storage.data_stores import DataStores
-from synapse.storage.data_stores.main.events import DeltaState
+from synapse.storage.databases import Databases
+from synapse.storage.databases.main.events import DeltaState
 from synapse.types import StateMap
 from synapse.util.async_helpers import ObservableDeferred
 from synapse.util.metrics import Measure
@@ -179,7 +179,7 @@ class EventsPersistenceStorage(object):
     current state and forward extremity changes.
     """
 
-    def __init__(self, hs, stores: DataStores):
+    def __init__(self, hs, stores: Databases):
         # We ultimately want to split out the state store from the main store,
         # so we use separate variables here even though they point to the same
         # store for now.
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 9cc3b51fe6..1c5f305132 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -47,8 +47,8 @@ class UpgradeDatabaseException(PrepareDatabaseException):
     pass
 
 
-def prepare_database(db_conn, database_engine, config, data_stores=["main", "state"]):
-    """Prepares a database for usage. Will either create all necessary tables
+def prepare_database(db_conn, database_engine, config, databases=["main", "state"]):
+    """Prepares a physical database for usage. Will either create all necessary tables
     or upgrade from an older schema version.
 
     If `config` is None then prepare_database will assert that no upgrade is
@@ -60,8 +60,8 @@ def prepare_database(db_conn, database_engine, config, data_stores=["main", "sta
         config (synapse.config.homeserver.HomeServerConfig|None):
             application config, or None if we are connecting to an existing
             database which we expect to be configured already
-        data_stores (list[str]): The name of the data stores that will be used
-            with this database. Defaults to all data stores.
+        databases (list[str]): The name of the databases that will be used
+            with this physical database. Defaults to all databases.
     """
 
     try:
@@ -87,10 +87,10 @@ def prepare_database(db_conn, database_engine, config, data_stores=["main", "sta
                     upgraded,
                     database_engine,
                     config,
-                    data_stores=data_stores,
+                    databases=databases,
                 )
         else:
-            _setup_new_database(cur, database_engine, data_stores=data_stores)
+            _setup_new_database(cur, database_engine, databases=databases)
 
         # check if any of our configured dynamic modules want a database
         if config is not None:
@@ -103,9 +103,9 @@ def prepare_database(db_conn, database_engine, config, data_stores=["main", "sta
         raise
 
 
-def _setup_new_database(cur, database_engine, data_stores):
-    """Sets up the database by finding a base set of "full schemas" and then
-    applying any necessary deltas, including schemas from the given data
+def _setup_new_database(cur, database_engine, databases):
+    """Sets up the physical database by finding a base set of "full schemas" and
+    then applying any necessary deltas, including schemas from the given data
     stores.
 
     The "full_schemas" directory has subdirectories named after versions. This
@@ -138,8 +138,8 @@ def _setup_new_database(cur, database_engine, data_stores):
     Args:
         cur (Cursor): a database cursor
         database_engine (DatabaseEngine)
-        data_stores (list[str]): The names of the data stores to instantiate
-            on the given database.
+        databases (list[str]): The names of the databases to instantiate
+            on the given physical database.
     """
 
     # We're about to set up a brand new database so we check that its
@@ -176,13 +176,13 @@ def _setup_new_database(cur, database_engine, data_stores):
     directories.extend(
         os.path.join(
             dir_path,
-            "data_stores",
-            data_store,
+            "databases",
+            database,
             "schema",
             "full_schemas",
             str(max_current_ver),
         )
-        for data_store in data_stores
+        for database in databases
     )
 
     directory_entries = []
@@ -219,7 +219,7 @@ def _setup_new_database(cur, database_engine, data_stores):
         upgraded=False,
         database_engine=database_engine,
         config=None,
-        data_stores=data_stores,
+        databases=databases,
         is_empty=True,
     )
 
@@ -231,10 +231,10 @@ def _upgrade_existing_database(
     upgraded,
     database_engine,
     config,
-    data_stores,
+    databases,
     is_empty=False,
 ):
-    """Upgrades an existing database.
+    """Upgrades an existing physical database.
 
     Delta files can either be SQL stored in *.sql files, or python modules
     in *.py.
@@ -285,8 +285,8 @@ def _upgrade_existing_database(
         config (synapse.config.homeserver.HomeServerConfig|None):
             None if we are initialising a blank database, otherwise the application
             config
-        data_stores (list[str]): The names of the data stores to instantiate
-            on the given database.
+        databases (list[str]): The names of the databases to instantiate
+            on the given physical database.
         is_empty (bool): Is this a blank database? I.e. do we need to run the
             upgrade portions of the delta scripts.
     """
@@ -303,8 +303,8 @@ def _upgrade_existing_database(
 
     # some of the deltas assume that config.server_name is set correctly, so now
     # is a good time to run the sanity check.
-    if not is_empty and "main" in data_stores:
-        from synapse.storage.data_stores.main import check_database_before_upgrade
+    if not is_empty and "main" in databases:
+        from synapse.storage.databases.main import check_database_before_upgrade
 
         check_database_before_upgrade(cur, database_engine, config)
 
@@ -330,11 +330,9 @@ def _upgrade_existing_database(
         # First we find the directories to search in
         delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
         directories = [delta_dir]
-        for data_store in data_stores:
+        for database in databases:
             directories.append(
-                os.path.join(
-                    dir_path, "data_stores", data_store, "schema", "delta", str(v)
-                )
+                os.path.join(dir_path, "databases", database, "schema", "delta", str(v))
             )
 
         # Used to check if we have any duplicate file names
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 787cebfbec..e2ddd01290 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -20,7 +20,7 @@ from typing import Dict, Set, Tuple
 
 from typing_extensions import Deque
 
-from synapse.storage.database import Database, LoggingTransaction
+from synapse.storage.database import DatabasePool, LoggingTransaction
 from synapse.storage.util.sequence import PostgresSequenceGenerator
 
 
@@ -239,7 +239,7 @@ class MultiWriterIdGenerator:
     def __init__(
         self,
         db_conn,
-        db: Database,
+        db: DatabasePool,
         instance_name: str,
         table: str,
         instance_column: str,
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index 5d3eddcfdc..393e34b9fb 100644
--- a/synapse/streams/events.py
+++ b/synapse/streams/events.py
@@ -15,8 +15,6 @@
 
 from typing import Any, Dict
 
-from twisted.internet import defer
-
 from synapse.handlers.account_data import AccountDataEventSource
 from synapse.handlers.presence import PresenceEventSource
 from synapse.handlers.receipts import ReceiptEventSource
@@ -40,19 +38,18 @@ class EventSources(object):
         }  # type: Dict[str, Any]
         self.store = hs.get_datastore()
 
-    @defer.inlineCallbacks
-    def get_current_token(self):
+    def get_current_token(self) -> StreamToken:
         push_rules_key, _ = self.store.get_push_rules_stream_token()
         to_device_key = self.store.get_to_device_stream_token()
         device_list_key = self.store.get_device_stream_token()
         groups_key = self.store.get_group_stream_token()
 
         token = StreamToken(
-            room_key=(yield self.sources["room"].get_current_key()),
-            presence_key=(yield self.sources["presence"].get_current_key()),
-            typing_key=(yield self.sources["typing"].get_current_key()),
-            receipt_key=(yield self.sources["receipt"].get_current_key()),
-            account_data_key=(yield self.sources["account_data"].get_current_key()),
+            room_key=self.sources["room"].get_current_key(),
+            presence_key=self.sources["presence"].get_current_key(),
+            typing_key=self.sources["typing"].get_current_key(),
+            receipt_key=self.sources["receipt"].get_current_key(),
+            account_data_key=self.sources["account_data"].get_current_key(),
             push_rules_key=push_rules_key,
             to_device_key=to_device_key,
             device_list_key=device_list_key,
@@ -60,8 +57,7 @@ class EventSources(object):
         )
         return token
 
-    @defer.inlineCallbacks
-    def get_current_token_for_pagination(self):
+    def get_current_token_for_pagination(self) -> StreamToken:
         """Get the current token for a given room to be used to paginate
         events.
 
@@ -69,10 +65,10 @@ class EventSources(object):
         than `room`, since they are not used during pagination.
 
         Returns:
-            Deferred[StreamToken]
+            The current token for pagination.
         """
         token = StreamToken(
-            room_key=(yield self.sources["room"].get_current_key()),
+            room_key=self.sources["room"].get_current_key(),
             presence_key=0,
             typing_key=0,
             receipt_key=0,
diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py
new file mode 100644
index 0000000000..23393cf49b
--- /dev/null
+++ b/synapse/util/daemonize.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, 2013, 2014 Ilya Otyutskiy <ilya.otyutskiy@icloud.com>
+# Copyright 2020 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import atexit
+import fcntl
+import logging
+import os
+import signal
+import sys
+
+
+def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None:
+    """daemonize the current process
+
+    This calls fork(), and has the main process exit. When it returns we will be
+    running in the child process.
+    """
+
+    # If pidfile already exists, we should read pid from there; to overwrite it, if
+    # locking will fail, because locking attempt somehow purges the file contents.
+    if os.path.isfile(pid_file):
+        with open(pid_file, "r") as pid_fh:
+            old_pid = pid_fh.read()
+
+    # Create a lockfile so that only one instance of this daemon is running at any time.
+    try:
+        lock_fh = open(pid_file, "w")
+    except IOError:
+        print("Unable to create the pidfile.")
+        sys.exit(1)
+
+    try:
+        # Try to get an exclusive lock on the file. This will fail if another process
+        # has the file locked.
+        fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
+    except IOError:
+        print("Unable to lock on the pidfile.")
+        # We need to overwrite the pidfile if we got here.
+        #
+        # XXX better to avoid overwriting it, surely. this looks racey as the pid file
+        # could be created between us trying to read it and us trying to lock it.
+        with open(pid_file, "w") as pid_fh:
+            pid_fh.write(old_pid)
+        sys.exit(1)
+
+    # Fork, creating a new process for the child.
+    process_id = os.fork()
+
+    if process_id != 0:
+        # parent process: exit.
+
+        # we use os._exit to avoid running the atexit handlers. In particular, that
+        # means we don't flush the logs. This is important because if we are using
+        # a MemoryHandler, we could have logs buffered which are now buffered in both
+        # the main and the child process, so if we let the main process flush the logs,
+        # we'll get two copies.
+        os._exit(0)
+
+    # This is the child process. Continue.
+
+    # Stop listening for signals that the parent process receives.
+    # This is done by getting a new process id.
+    # setpgrp() is an alternative to setsid().
+    # setsid puts the process in a new parent group and detaches its controlling
+    # terminal.
+
+    os.setsid()
+
+    # point stdin, stdout, stderr at /dev/null
+    devnull = "/dev/null"
+    if hasattr(os, "devnull"):
+        # Python has set os.devnull on this system, use it instead as it might be
+        # different than /dev/null.
+        devnull = os.devnull
+
+    devnull_fd = os.open(devnull, os.O_RDWR)
+    os.dup2(devnull_fd, 0)
+    os.dup2(devnull_fd, 1)
+    os.dup2(devnull_fd, 2)
+    os.close(devnull_fd)
+
+    # now that we have redirected stderr to /dev/null, any uncaught exceptions will
+    # get sent to /dev/null, so make sure we log them.
+    #
+    # (we don't normally expect reactor.run to raise any exceptions, but this will
+    # also catch any other uncaught exceptions before we get that far.)
+
+    def excepthook(type_, value, traceback):
+        logger.critical("Unhanded exception", exc_info=(type_, value, traceback))
+
+    sys.excepthook = excepthook
+
+    # Set umask to default to safe file permissions when running as a root daemon. 027
+    # is an octal number which we are typing as 0o27 for Python3 compatibility.
+    os.umask(0o27)
+
+    # Change to a known directory. If this isn't done, starting a daemon in a
+    # subdirectory that needs to be deleted results in "directory busy" errors.
+    os.chdir(chdir)
+
+    try:
+        lock_fh.write("%s" % (os.getpid()))
+        lock_fh.flush()
+    except IOError:
+        logger.error("Unable to write pid to the pidfile.")
+        print("Unable to write pid to the pidfile.")
+        sys.exit(1)
+
+    # write a log line on SIGTERM.
+    def sigterm(signum, frame):
+        logger.warning("Caught signal %s. Stopping daemon." % signum)
+        sys.exit(0)
+
+    signal.signal(signal.SIGTERM, sigterm)
+
+    # Cleanup pid file at exit.
+    def exit():
+        logger.warning("Stopping daemon.")
+        os.remove(pid_file)
+        sys.exit(0)
+
+    atexit.register(exit)
+
+    logger.warning("Starting daemon.")