diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 3285450742..64a7808140 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -37,7 +37,6 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.stream import generate_pagination_where_clause
from synapse.storage.engines import PostgresEngine
-from synapse.storage.relations import PaginationChunk
from synapse.types import JsonDict, RoomStreamToken, StreamToken
from synapse.util.caches.descriptors import cached, cachedList
@@ -71,7 +70,7 @@ class RelationsWorkerStore(SQLBaseStore):
direction: str = "b",
from_token: Optional[StreamToken] = None,
to_token: Optional[StreamToken] = None,
- ) -> PaginationChunk:
+ ) -> Tuple[List[str], Optional[StreamToken]]:
"""Get a list of relations for an event, ordered by topological ordering.
Args:
@@ -88,8 +87,10 @@ class RelationsWorkerStore(SQLBaseStore):
to_token: Fetch rows up to the given token, or up to the end if None.
Returns:
- List of event IDs that match relations requested. The rows are of
- the form `{"event_id": "..."}`.
+ A tuple of:
+ A list of related event IDs
+
+ The next stream token, if one exists.
"""
# We don't use `event_id`, it's there so that we can cache based on
# it. The `event_id` must match the `event.event_id`.
@@ -144,7 +145,7 @@ class RelationsWorkerStore(SQLBaseStore):
def _get_recent_references_for_event_txn(
txn: LoggingTransaction,
- ) -> PaginationChunk:
+ ) -> Tuple[List[str], Optional[StreamToken]]:
txn.execute(sql, where_args + [limit + 1])
last_topo_id = None
@@ -154,7 +155,7 @@ class RelationsWorkerStore(SQLBaseStore):
# Do not include edits for redacted events as they leak event
# content.
if not is_redacted or row[1] != RelationTypes.REPLACE:
- events.append({"event_id": row[0]})
+ events.append(row[0])
last_topo_id = row[2]
last_stream_id = row[3]
@@ -177,9 +178,7 @@ class RelationsWorkerStore(SQLBaseStore):
groups_key=0,
)
- return PaginationChunk(
- chunk=list(events[:limit]), next_batch=next_token, prev_batch=from_token
- )
+ return events[:limit], next_token
return await self.db_pool.runInteraction(
"get_recent_references_for_event", _get_recent_references_for_event_txn
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
deleted file mode 100644
index b9d2b46799..0000000000
--- a/synapse/storage/relations.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2019 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
-
-import attr
-
-from synapse.types import JsonDict
-
-if TYPE_CHECKING:
- from synapse.storage.databases.main import DataStore
-
-logger = logging.getLogger(__name__)
-
-
-@attr.s(slots=True, auto_attribs=True)
-class PaginationChunk:
- """Returned by relation pagination APIs.
-
- Attributes:
- chunk: The rows returned by pagination
- next_batch: Token to fetch next set of results with, if
- None then there are no more results.
- prev_batch: Token to fetch previous set of results with, if
- None then there are no previous results.
- """
-
- chunk: List[JsonDict]
- next_batch: Optional[Any] = None
- prev_batch: Optional[Any] = None
-
- async def to_dict(self, store: "DataStore") -> Dict[str, Any]:
- d = {"chunk": self.chunk}
-
- if self.next_batch:
- d["next_batch"] = await self.next_batch.to_string(store)
-
- if self.prev_batch:
- d["prev_batch"] = await self.prev_batch.to_string(store)
-
- return d
|