summary refs log tree commit diff
diff options
context:
space:
mode:
authorPatrick Cloke <clokep@users.noreply.github.com>2020-10-20 11:29:38 -0400
committerGitHub <noreply@github.com>2020-10-20 11:29:38 -0400
commit9e0f22874f7ec338895d85645e91db8674c383c0 (patch)
tree4ad1b3e695380b1232808018d22f1f42a0e2b9a4
parentUpdate mypy to 0.790, and move dependencies to extras (#8583) (diff)
downloadsynapse-9e0f22874f7ec338895d85645e91db8674c383c0.tar.xz
Consistently use wrap_as_background_task in more places (#8599)
-rw-r--r--changelog.d/8599.feature1
-rw-r--r--synapse/handlers/account_validity.py12
-rw-r--r--synapse/handlers/profile.py10
-rw-r--r--synapse/storage/databases/main/events_worker.py11
-rw-r--r--synapse/storage/databases/main/roommember.py16
5 files changed, 20 insertions, 30 deletions
diff --git a/changelog.d/8599.feature b/changelog.d/8599.feature
new file mode 100644
index 0000000000..542993110b
--- /dev/null
+++ b/changelog.d/8599.feature
@@ -0,0 +1 @@
+Allow running background tasks in a separate worker process.
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index f33044e97a..fd4f762f33 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -22,7 +22,7 @@ from typing import List
 
 from synapse.api.errors import StoreError
 from synapse.logging.context import make_deferred_yieldable
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.types import UserID
 from synapse.util import stringutils
 
@@ -63,16 +63,10 @@ class AccountValidityHandler:
             self._raw_from = email.utils.parseaddr(self._from_string)[1]
 
             # Check the renewal emails to send and send them every 30min.
-            def send_emails():
-                # run as a background process to make sure that the database transactions
-                # have a logcontext to report to
-                return run_as_background_process(
-                    "send_renewals", self._send_renewal_emails
-                )
-
             if hs.config.run_background_tasks:
-                self.clock.looping_call(send_emails, 30 * 60 * 1000)
+                self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
 
+    @wrap_as_background_process("send_renewals")
     async def _send_renewal_emails(self):
         """Gets the list of users whose account is expiring in the amount of time
         configured in the ``renew_at`` parameter from the ``account_validity``
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index b784938755..b78a12ad01 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -24,7 +24,7 @@ from synapse.api.errors import (
     StoreError,
     SynapseError,
 )
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import wrap_as_background_process
 from synapse.types import UserID, create_requester, get_domain_from_id
 
 from ._base import BaseHandler
@@ -57,7 +57,7 @@ class ProfileHandler(BaseHandler):
 
         if hs.config.run_background_tasks:
             self.clock.looping_call(
-                self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS
+                self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
             )
 
     async def get_profile(self, user_id):
@@ -370,11 +370,7 @@ class ProfileHandler(BaseHandler):
                 raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
             raise
 
-    def _start_update_remote_profile_cache(self):
-        return run_as_background_process(
-            "Update remote profile", self._update_remote_profile_cache
-        )
-
+    @wrap_as_background_process("Update remote profile")
     async def _update_remote_profile_cache(self):
         """Called periodically to check profiles of remote users we haven't
         checked in a while.
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index c342df2a8b..6e7f16f39c 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -33,7 +33,10 @@ from synapse.api.room_versions import (
 from synapse.events import EventBase, make_event_from_dict
 from synapse.events.utils import prune_event
 from synapse.logging.context import PreserveLoggingContext, current_context
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import (
+    run_as_background_process,
+    wrap_as_background_process,
+)
 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
 from synapse.replication.tcp.streams import BackfillStream
 from synapse.replication.tcp.streams.events import EventsStream
@@ -140,10 +143,7 @@ class EventsWorkerStore(SQLBaseStore):
         if hs.config.run_background_tasks:
             # We periodically clean out old transaction ID mappings
             self._clock.looping_call(
-                run_as_background_process,
-                5 * 60 * 1000,
-                "_cleanup_old_transaction_ids",
-                self._cleanup_old_transaction_ids,
+                self._cleanup_old_transaction_ids, 5 * 60 * 1000,
             )
 
         self._get_event_cache = LruCache(
@@ -1374,6 +1374,7 @@ class EventsWorkerStore(SQLBaseStore):
 
         return mapping
 
+    @wrap_as_background_process("_cleanup_old_transaction_ids")
     async def _cleanup_old_transaction_ids(self):
         """Cleans out transaction id mappings older than 24hrs.
         """
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 9b08b49862..01d9dbb36f 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -20,7 +20,10 @@ from synapse.api.constants import EventTypes, Membership
 from synapse.events import EventBase
 from synapse.events.snapshot import EventContext
 from synapse.metrics import LaterGauge
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import (
+    run_as_background_process,
+    wrap_as_background_process,
+)
 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
 from synapse.storage.database import DatabasePool
 from synapse.storage.databases.main.events_worker import EventsWorkerStore
@@ -67,16 +70,10 @@ class RoomMemberWorkerStore(EventsWorkerStore):
         ):
             self._known_servers_count = 1
             self.hs.get_clock().looping_call(
-                run_as_background_process,
-                60 * 1000,
-                "_count_known_servers",
-                self._count_known_servers,
+                self._count_known_servers, 60 * 1000,
             )
             self.hs.get_clock().call_later(
-                1000,
-                run_as_background_process,
-                "_count_known_servers",
-                self._count_known_servers,
+                1000, self._count_known_servers,
             )
             LaterGauge(
                 "synapse_federation_known_servers",
@@ -85,6 +82,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
                 lambda: self._known_servers_count,
             )
 
+    @wrap_as_background_process("_count_known_servers")
     async def _count_known_servers(self):
         """
         Count the servers that this server knows about.