summary refs log tree commit diff
path: root/synapse/app/pusher.py
diff options
context:
space:
mode:
authorPaul "LeoNerd" Evans <paul@matrix.org>2016-08-18 14:21:01 +0100
committerPaul "LeoNerd" Evans <paul@matrix.org>2016-08-18 14:21:01 +0100
commitd5bf7a4a991b0bfe2134bb5e5c7e194f33f037aa (patch)
tree2187b123096728d0ff329d354368ef48daed1fdb /synapse/app/pusher.py
parentSince empty lookups now return 200/empty list not 404, we can safely log fail... (diff)
parentMerge pull request #1025 from matrix-org/erikj/appservice_stream (diff)
downloadsynapse-d5bf7a4a991b0bfe2134bb5e5c7e194f33f037aa.tar.xz
Merge remote-tracking branch 'origin/develop' into paul/thirdpartylookup
Diffstat (limited to 'synapse/app/pusher.py')
-rw-r--r--synapse/app/pusher.py16
1 files changed, 0 insertions, 16 deletions
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index c8dde0fcb8..8d755a4b33 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -80,11 +80,6 @@ class PusherSlaveStore(
         DataStore.get_profile_displayname.__func__
     )
 
-    # XXX: This is a bit broken because we don't persist forgotten rooms
-    # in a way that they can be streamed. This means that we don't have a
-    # way to invalidate the forgotten rooms cache correctly.
-    # For now we expire the cache every 10 minutes.
-    BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
     who_forgot_in_room = (
         RoomMemberStore.__dict__["who_forgot_in_room"]
     )
@@ -168,7 +163,6 @@ class PusherServer(HomeServer):
         store = self.get_datastore()
         replication_url = self.config.worker_replication_url
         pusher_pool = self.get_pusherpool()
-        clock = self.get_clock()
 
         def stop_pusher(user_id, app_id, pushkey):
             key = "%s:%s" % (app_id, pushkey)
@@ -220,21 +214,11 @@ class PusherServer(HomeServer):
                     min_stream_id, max_stream_id, affected_room_ids
                 )
 
-        def expire_broken_caches():
-            store.who_forgot_in_room.invalidate_all()
-
-        next_expire_broken_caches_ms = 0
         while True:
             try:
                 args = store.stream_positions()
                 args["timeout"] = 30000
                 result = yield http_client.get_json(replication_url, args=args)
-                now_ms = clock.time_msec()
-                if now_ms > next_expire_broken_caches_ms:
-                    expire_broken_caches()
-                    next_expire_broken_caches_ms = (
-                        now_ms + store.BROKEN_CACHE_EXPIRY_MS
-                    )
                 yield store.process_replication(result)
                 poke_pushers(result)
             except: