summary refs log tree commit diff
path: root/synapse/replication/http
diff options
context:
space:
mode:
authorRichard van der Hoff <richard@matrix.org>2018-04-12 12:08:59 +0100
committerRichard van der Hoff <richard@matrix.org>2018-04-12 13:02:15 +0100
commitb78395b7fe449d59a5c46c81a869f9f191cd934f (patch)
tree3242266e7cafff4c6fc4084438d00f9a9025ee47 /synapse/replication/http
parentMerge pull request #3092 from matrix-org/rav/response_cache_metrics (diff)
downloadsynapse-b78395b7fe449d59a5c46c81a869f9f191cd934f.tar.xz
Refactor ResponseCache usage
Adds a `.wrap` method to ResponseCache which wraps up the boilerplate of a
(get, set) pair, and then use it throughout the codebase.

This will be largely non-functional, but does include the following functional
changes:

* federation_server.on_context_state_request: drops use of _server_linearizer
  which looked redundant and could cause incorrect cache misses by yielding
  between the get and the set.
* RoomListHandler.get_remote_public_room_list(): fixes logcontext leaks
* the wrap function includes some logging. I'm hoping this won't be too noisy
  on production.
Diffstat (limited to 'synapse/replication/http')
-rw-r--r--synapse/replication/http/send_event.py18
1 files changed, 6 insertions, 12 deletions
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index c6a6551d24..a9baa2c1c3 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -23,7 +23,6 @@ from synapse.events.snapshot import EventContext
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.util.async import sleep
 from synapse.util.caches.response_cache import ResponseCache
-from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
 from synapse.util.metrics import Measure
 from synapse.types import Requester, UserID
 
@@ -118,17 +117,12 @@ class ReplicationSendEventRestServlet(RestServlet):
         self.response_cache = ResponseCache(hs, "send_event", timeout_ms=30 * 60 * 1000)
 
     def on_PUT(self, request, event_id):
-        result = self.response_cache.get(event_id)
-        if not result:
-            result = self.response_cache.set(
-                event_id,
-                self._handle_request(request)
-            )
-        else:
-            logger.warn("Returning cached response")
-        return make_deferred_yieldable(result)
-
-    @preserve_fn
+        return self.response_cache.wrap(
+            event_id,
+            self._handle_request,
+            request
+        )
+
     @defer.inlineCallbacks
     def _handle_request(self, request):
         with Measure(self.clock, "repl_send_event_parse"):