summary refs log tree commit diff
path: root/synapse/storage
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/storage')
-rw-r--r--synapse/storage/__init__.py2
-rw-r--r--synapse/storage/background_updates.py4
-rw-r--r--synapse/storage/database.py4
-rw-r--r--synapse/storage/databases/__init__.py2
-rw-r--r--synapse/storage/databases/main/roommember.py2
-rw-r--r--synapse/storage/keys.py2
-rw-r--r--synapse/storage/persist_events.py4
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/purge_events.py2
-rw-r--r--synapse/storage/relations.py6
-rw-r--r--synapse/storage/state.py4
-rw-r--r--synapse/storage/util/id_generators.py4
12 files changed, 19 insertions, 19 deletions
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 5ef3853559..8e5d78f6f7 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -37,7 +37,7 @@ from synapse.storage.state import StateGroupStorage
 __all__ = ["DataStores", "DataStore"]
 
 
-class Storage(object):
+class Storage:
     """The high level interfaces for talking to various storage layers.
     """
 
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 67a89cd51a..810721ebe9 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -24,7 +24,7 @@ from . import engines
 logger = logging.getLogger(__name__)
 
 
-class BackgroundUpdatePerformance(object):
+class BackgroundUpdatePerformance:
     """Tracks the how long a background update is taking to update its items"""
 
     def __init__(self, name):
@@ -71,7 +71,7 @@ class BackgroundUpdatePerformance(object):
             return float(self.total_item_count) / float(self.total_duration_ms)
 
 
-class BackgroundUpdater(object):
+class BackgroundUpdater:
     """ Background updates are updates to the database that run in the
     background. Each update processes a batch of data at once. We attempt to
     limit the impact of each update by monitoring how long each batch takes to
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 78ca6d8346..8be943f589 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -248,7 +248,7 @@ class LoggingTransaction:
         self.txn.close()
 
 
-class PerformanceCounters(object):
+class PerformanceCounters:
     def __init__(self):
         self.current_counters = {}
         self.previous_counters = {}
@@ -286,7 +286,7 @@ class PerformanceCounters(object):
 R = TypeVar("R")
 
 
-class DatabasePool(object):
+class DatabasePool:
     """Wraps a single physical database and connection pool.
 
     A single database may be used by multiple data stores.
diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index 0ac854aee2..7f08bd8285 100644
--- a/synapse/storage/databases/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -24,7 +24,7 @@ from synapse.storage.prepare_database import prepare_database
 logger = logging.getLogger(__name__)
 
 
-class Databases(object):
+class Databases:
     """The various databases.
 
     These are low level interfaces to physical databases.
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index c46f5cd524..91a8b43da3 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -999,7 +999,7 @@ class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore):
         await self.db_pool.runInteraction("forget_membership", f)
 
 
-class _JoinedHostsCache(object):
+class _JoinedHostsCache:
     """Cache for joined hosts in a room that is optimised to handle updates
     via state deltas.
     """
diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py
index 4769b21529..afd10f7bae 100644
--- a/synapse/storage/keys.py
+++ b/synapse/storage/keys.py
@@ -22,6 +22,6 @@ logger = logging.getLogger(__name__)
 
 
 @attr.s(slots=True, frozen=True)
-class FetchKeyResult(object):
+class FetchKeyResult:
     verify_key = attr.ib()  # VerifyKey: the key itself
     valid_until_ts = attr.ib()  # int: how long we can use this key for
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index f15b95e633..dbaeef91dd 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -69,7 +69,7 @@ stale_forward_extremities_counter = Histogram(
 )
 
 
-class _EventPeristenceQueue(object):
+class _EventPeristenceQueue:
     """Queues up events so that they can be persisted in bulk with only one
     concurrent transaction per room.
     """
@@ -172,7 +172,7 @@ class _EventPeristenceQueue(object):
             pass
 
 
-class EventsPersistenceStorage(object):
+class EventsPersistenceStorage:
     """High level interface for handling persisting newly received events.
 
     Takes care of batching up events by room, and calculating the necessary
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 1c5f305132..964d8d9eb8 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -569,7 +569,7 @@ def _get_or_create_schema_state(txn, database_engine):
 
 
 @attr.s()
-class _DirectoryListing(object):
+class _DirectoryListing:
     """Helper class to store schema file name and the
     absolute path to it.
 
diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py
index 79d9f06e2e..bfa0a9fd06 100644
--- a/synapse/storage/purge_events.py
+++ b/synapse/storage/purge_events.py
@@ -20,7 +20,7 @@ from typing import Set
 logger = logging.getLogger(__name__)
 
 
-class PurgeEventsStorage(object):
+class PurgeEventsStorage:
     """High level interface for purging rooms and event history.
     """
 
diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py
index d471ec9860..d30e3f11e7 100644
--- a/synapse/storage/relations.py
+++ b/synapse/storage/relations.py
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
 
 
 @attr.s
-class PaginationChunk(object):
+class PaginationChunk:
     """Returned by relation pagination APIs.
 
     Attributes:
@@ -51,7 +51,7 @@ class PaginationChunk(object):
 
 
 @attr.s(frozen=True, slots=True)
-class RelationPaginationToken(object):
+class RelationPaginationToken:
     """Pagination token for relation pagination API.
 
     As the results are in topological order, we can use the
@@ -82,7 +82,7 @@ class RelationPaginationToken(object):
 
 
 @attr.s(frozen=True, slots=True)
-class AggregationPaginationToken(object):
+class AggregationPaginationToken:
     """Pagination token for relation aggregation pagination API.
 
     As the results are order by count and then MAX(stream_ordering) of the
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 96a1b59d64..8f68d968f0 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -29,7 +29,7 @@ T = TypeVar("T")
 
 
 @attr.s(slots=True)
-class StateFilter(object):
+class StateFilter:
     """A filter used when querying for state.
 
     Attributes:
@@ -326,7 +326,7 @@ class StateFilter(object):
         return member_filter, non_member_filter
 
 
-class StateGroupStorage(object):
+class StateGroupStorage:
     """High level interface to fetching state for event.
     """
 
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 9f3d23f0a5..76bc3afdfa 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -25,7 +25,7 @@ from synapse.storage.database import DatabasePool, LoggingTransaction
 from synapse.storage.util.sequence import PostgresSequenceGenerator
 
 
-class IdGenerator(object):
+class IdGenerator:
     def __init__(self, db_conn, table, column):
         self._lock = threading.Lock()
         self._next_id = _load_current_id(db_conn, table, column)
@@ -59,7 +59,7 @@ def _load_current_id(db_conn, table, column, step=1):
     return (max if step > 0 else min)(current_id, step)
 
 
-class StreamIdGenerator(object):
+class StreamIdGenerator:
     """Used to generate new stream ids when persisting events while keeping
     track of which transactions have been completed.