summary refs log tree commit diff
path: root/synapse/federation/replication.py
diff options
context:
space:
mode:
authorErik Johnston <erik@matrix.org>2014-08-14 10:01:04 +0100
committerErik Johnston <erik@matrix.org>2014-08-14 10:01:04 +0100
commit10294b60824347d73b01f7ce4add18467d1e6f0c (patch)
tree06d04a0a2e0ccf183bfc602ce9694d5958ed7a2e /synapse/federation/replication.py
parentMake feedback table also store sender. (diff)
parentgrammar fix (diff)
downloadsynapse-10294b60824347d73b01f7ce4add18467d1e6f0c.tar.xz
Merge branch 'master' of github.com:matrix-org/synapse into sql_refactor
Conflicts:
	synapse/storage/_base.py
Diffstat (limited to 'synapse/federation/replication.py')
-rw-r--r--synapse/federation/replication.py18
1 files changed, 9 insertions, 9 deletions
diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py
index 01020566cf..bc9df2f214 100644
--- a/synapse/federation/replication.py
+++ b/synapse/federation/replication.py
@@ -118,7 +118,7 @@ class ReplicationLayer(object):
         *Note:* The home server should always call `send_pdu` even if it knows
         that it does not need to be replicated to other home servers. This is
         in case e.g. someone else joins via a remote home server and then
-        paginates.
+        backfills.
 
         TODO: Figure out when we should actually resolve the deferred.
 
@@ -179,13 +179,13 @@ class ReplicationLayer(object):
 
     @defer.inlineCallbacks
     @log_function
-    def paginate(self, dest, context, limit):
+    def backfill(self, dest, context, limit):
         """Requests some more historic PDUs for the given context from the
         given destination server.
 
         Args:
             dest (str): The remote home server to ask.
-            context (str): The context to paginate back on.
+            context (str): The context to backfill.
             limit (int): The maximum number of PDUs to return.
 
         Returns:
@@ -193,16 +193,16 @@ class ReplicationLayer(object):
         """
         extremities = yield self.store.get_oldest_pdus_in_context(context)
 
-        logger.debug("paginate extrem=%s", extremities)
+        logger.debug("backfill extrem=%s", extremities)
 
         # If there are no extremeties then we've (probably) reached the start.
         if not extremities:
             return
 
-        transaction_data = yield self.transport_layer.paginate(
+        transaction_data = yield self.transport_layer.backfill(
             dest, context, extremities, limit)
 
-        logger.debug("paginate transaction_data=%s", repr(transaction_data))
+        logger.debug("backfill transaction_data=%s", repr(transaction_data))
 
         transaction = Transaction(**transaction_data)
 
@@ -281,9 +281,9 @@ class ReplicationLayer(object):
 
     @defer.inlineCallbacks
     @log_function
-    def on_paginate_request(self, context, versions, limit):
+    def on_backfill_request(self, context, versions, limit):
 
-        pdus = yield self.pdu_actions.paginate(context, versions, limit)
+        pdus = yield self.pdu_actions.backfill(context, versions, limit)
 
         defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
 
@@ -427,7 +427,7 @@ class ReplicationLayer(object):
         # Get missing pdus if necessary.
         is_new = yield self.pdu_actions.is_new(pdu)
         if is_new and not pdu.outlier:
-            # We only paginate backwards to the min depth.
+            # We only backfill backwards to the min depth.
             min_depth = yield self.store.get_min_depth_for_context(pdu.context)
 
             if min_depth and pdu.depth > min_depth: