From be47cfa9c97b4acfd884440f1953ed000225eb37 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Jan 2019 17:19:31 +0000 Subject: Refactor event building into EventBuilder This is so that everything is done in one place, making it easier to change the event format based on room version --- synapse/storage/event_federation.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index d3b9dea1d6..38809ed0fc 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -125,6 +125,29 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, return dict(txn) + @defer.inlineCallbacks + def get_max_depth_of(self, event_ids): + """Returns the max depth of a set of event IDs + + Args: + event_ids (list[str]) + + Returns + Deferred[int] + """ + rows = yield self._simple_select_many_batch( + table="events", + column="event_id", + iterable=event_ids, + retcols=("depth",), + desc="get_max_depth_of", + ) + + if not rows: + defer.returnValue(0) + else: + defer.returnValue(max(row["depth"] for row in rows)) + def _get_oldest_events_in_room_txn(self, txn, room_id): return self._simple_select_onecol_txn( txn, -- cgit 1.5.1 From 7709d2bd167e27493b134e938410c307f8c10396 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 28 Jan 2019 21:09:45 +0000 Subject: Implement rechecking of redactions --- synapse/api/auth.py | 4 ++-- synapse/event_auth.py | 24 ++++++++++++++++++------ synapse/events/__init__.py | 3 +++ synapse/handlers/message.py | 6 +++++- synapse/storage/events_worker.py | 26 +++++++++++++++++++++++++- 5 files changed, 53 insertions(+), 10 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7b213e54c8..963e0e7d60 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -627,7 +627,7 @@ class Auth(object): defer.returnValue(auth_ids) - def check_redaction(self, event, auth_events): + def check_redaction(self, room_version, event, auth_events): """Check whether the event sender is allowed to redact the target event. Returns: @@ -640,7 +640,7 @@ class Auth(object): AuthError if the event sender is definitely not allowed to redact the target event. """ - return event_auth.check_redaction(event, auth_events) + return event_auth.check_redaction(room_version, event, auth_events) @defer.inlineCallbacks def check_can_change_room_list(self, room_id, user): diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 9adedbbb02..a95d142f0c 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -20,7 +20,13 @@ from signedjson.key import decode_verify_key_bytes from signedjson.sign import SignatureVerifyException, verify_signed_json from unpaddedbase64 import decode_base64 -from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership +from synapse.api.constants import ( + KNOWN_ROOM_VERSIONS, + EventTypes, + JoinRules, + Membership, + RoomVersions, +) from synapse.api.errors import AuthError, EventSizeError, SynapseError from synapse.types import UserID, get_domain_from_id @@ -168,7 +174,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru _check_power_levels(event, auth_events) if event.type == EventTypes.Redaction: - check_redaction(event, auth_events) + check_redaction(room_version, event, auth_events) logger.debug("Allowing! %s", event) @@ -422,7 +428,7 @@ def _can_send_event(event, auth_events): return True -def check_redaction(event, auth_events): +def check_redaction(room_version, event, auth_events): """Check whether the event sender is allowed to redact the target event. Returns: @@ -442,10 +448,16 @@ def check_redaction(event, auth_events): if user_level >= redact_level: return False - redacter_domain = get_domain_from_id(event.event_id) - redactee_domain = get_domain_from_id(event.redacts) - if redacter_domain == redactee_domain: + if room_version in (RoomVersions.V1, RoomVersions.V2, RoomVersions.VDH_TEST): + redacter_domain = get_domain_from_id(event.event_id) + redactee_domain = get_domain_from_id(event.redacts) + if redacter_domain == redactee_domain: + return True + elif room_version == RoomVersions.V3: + event.internal_metadata.recheck_redaction = True return True + else: + raise RuntimeError("Unrecognized room version %r" % (room_version,)) raise AuthError( 403, diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 3fe52aaa45..70d3c0fbd9 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -63,6 +63,9 @@ class _EventInternalMetadata(object): """ return getattr(self, "send_on_behalf_of", None) + def need_to_check_redaction(self): + return getattr(self, "recheck_redaction", False) + def _event_dict_property(key): # We want to be able to use hasattr with the event dict properties. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 05d1370c18..0cfced43d5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -767,7 +767,8 @@ class EventCreationHandler(object): auth_events = { (e.type, e.state_key): e for e in auth_events.values() } - if self.auth.check_redaction(event, auth_events=auth_events): + room_version = yield self.store.get_room_version(event.room_id) + if self.auth.check_redaction(room_version, event, auth_events=auth_events): original_event = yield self.store.get_event( event.redacts, check_redacted=False, @@ -781,6 +782,9 @@ class EventCreationHandler(object): "You don't have permission to redact events" ) + # We've already checked. + event.internal_metadata.recheck_redaction = False + if event.type == EventTypes.Create: prev_state_ids = yield context.get_prev_state_ids(self.store) if prev_state_ids: diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 0a0ca58fc4..9ce19430e8 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -21,13 +21,14 @@ from canonicaljson import json from twisted.internet import defer -from synapse.api.constants import EventFormatVersions +from synapse.api.constants import EventFormatVersions, EventTypes from synapse.api.errors import NotFoundError from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401 # these are only included to make the type annotations work from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import get_domain_from_id from synapse.util.logcontext import ( LoggingContext, PreserveLoggingContext, @@ -174,6 +175,29 @@ class EventsWorkerStore(SQLBaseStore): if not entry: continue + # Some redactions in room version v3 need to be rechecked if we + # didn't have the redacted event at the time, so we recheck on read + # instead. + if not allow_rejected and entry.event.type == EventTypes.Redaction: + if entry.event.internal_metadata.need_to_check_redaction(): + orig = yield self.get_event( + entry.event.redacts, + allow_none=True, + allow_rejected=True, + get_prev_content=False, + ) + expected_domain = get_domain_from_id(entry.event.sender) + if orig and get_domain_from_id(orig.sender) == expected_domain: + # This redaction event is allowed. Mark as not needing a + # recheck. + entry.event.recheck_redaction = False + else: + # We don't have the event that is being redacted, so we + # assume that the event isn't authorized for now. (If we + # later receive the event, then we will always redact + # it anyway, since we have this redaction) + continue + if allow_rejected or not entry.event.rejected_reason: if check_redacted and entry.redacted_event: event = entry.redacted_event -- cgit 1.5.1 From 82165eeb052ce69f078cdf3371127cc89c702424 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Jan 2019 21:14:39 +0000 Subject: Update synapse/storage/events_worker.py Co-Authored-By: erikjohnston --- synapse/storage/events_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 9ce19430e8..9dcb27b65d 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -175,7 +175,7 @@ class EventsWorkerStore(SQLBaseStore): if not entry: continue - # Some redactions in room version v3 need to be rechecked if we + # Starting in room version v3, some redactions need to be rechecked if we # didn't have the redacted event at the time, so we recheck on read # instead. if not allow_rejected and entry.event.type == EventTypes.Redaction: -- cgit 1.5.1 From b5d510ad64ad3ed4e40a61c389af875058a73258 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Jan 2019 21:45:28 +0000 Subject: Remove unused arg --- synapse/storage/events_worker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 9dcb27b65d..df8169b91e 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -163,7 +163,6 @@ class EventsWorkerStore(SQLBaseStore): missing_events = yield self._enqueue_events( missing_events_ids, - check_redacted=check_redacted, allow_rejected=allow_rejected, ) @@ -334,7 +333,7 @@ class EventsWorkerStore(SQLBaseStore): self.hs.get_reactor().callFromThread(fire, event_list, e) @defer.inlineCallbacks - def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): + def _enqueue_events(self, events, allow_rejected=False): """Fetches events from the database using the _event_fetch_list. This allows batch and bulk fetching of events - it allows us to fetch events without having to create a new transaction for each request for events. -- cgit 1.5.1 From 6d23ec21116d5312e26315c0c75f3761398daf00 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Jan 2019 21:45:53 +0000 Subject: Fix typo --- synapse/storage/events_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index df8169b91e..0ff80fa728 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -189,7 +189,7 @@ class EventsWorkerStore(SQLBaseStore): if orig and get_domain_from_id(orig.sender) == expected_domain: # This redaction event is allowed. Mark as not needing a # recheck. - entry.event.recheck_redaction = False + entry.event.internal_metadata.recheck_redaction = False else: # We don't have the event that is being redacted, so we # assume that the event isn't authorized for now. (If we -- cgit 1.5.1 From 4db252c0738c590ed5fa0b53c61f9dd9da1cb33e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Jan 2019 21:46:17 +0000 Subject: Check redaction state when event is pulled out of the database --- synapse/storage/events_worker.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'synapse/storage') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 0ff80fa728..1d75e309b3 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -466,6 +466,19 @@ class EventsWorkerStore(SQLBaseStore): # will serialise this field correctly redacted_event.unsigned["redacted_because"] = because + # Starting in room version v3, some redactions need to be + # rechecked if we didn't have the redacted event at the + # time, so we recheck on read instead. + if because.internal_metadata.need_to_check_redaction(): + expected_domain = get_domain_from_id(original_ev.sender) + if get_domain_from_id(because.sender) == expected_domain: + # This redaction event is allowed. Mark as not needing a + # recheck. + because.internal_metadata.recheck_redaction = False + else: + # Senders don't match, so the event is actually redacted + redacted_event = None + cache_entry = _EventCacheEntry( event=original_ev, redacted_event=redacted_event, -- cgit 1.5.1 From c21b7cbc0978080beb9837c3027b15a094670f91 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Jan 2019 21:53:48 +0000 Subject: Update synapse/storage/events_worker.py --- synapse/storage/events_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'synapse/storage') diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 1d75e309b3..ebe1429acb 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -476,7 +476,7 @@ class EventsWorkerStore(SQLBaseStore): # recheck. because.internal_metadata.recheck_redaction = False else: - # Senders don't match, so the event is actually redacted + # Senders don't match, so the event isn't actually redacted redacted_event = None cache_entry = _EventCacheEntry( -- cgit 1.5.1 From 3f189c902ea1146a497512049aa38fe9a0a91169 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 30 Jan 2019 10:53:17 +0000 Subject: Fix flake8 (#4519) --- changelog.d/4519.misc | 1 + synapse/_scripts/register_new_matrix_user.py | 4 +- synapse/handlers/directory.py | 4 +- synapse/handlers/federation.py | 2 +- synapse/push/clientformat.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/events.py | 168 +++++++++++++-------------- synapse/storage/events_worker.py | 2 +- tests/storage/test_background_update.py | 2 +- tests/storage/test_end_to_end_keys.py | 3 - tests/storage/test_keys.py | 3 - tests/storage/test_state.py | 3 - 12 files changed, 94 insertions(+), 102 deletions(-) create mode 100644 changelog.d/4519.misc (limited to 'synapse/storage') diff --git a/changelog.d/4519.misc b/changelog.d/4519.misc new file mode 100644 index 0000000000..897e783d28 --- /dev/null +++ b/changelog.d/4519.misc @@ -0,0 +1 @@ +Fix code to comply with linting in PyFlakes 3.7.1. diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 4c3abf06fe..6e93f5a0c6 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -46,7 +46,7 @@ def request_registration( # Get the nonce r = requests.get(url, verify=False) - if r.status_code is not 200: + if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) if 400 <= r.status_code < 500: try: @@ -84,7 +84,7 @@ def request_registration( _print("Sending registration request...") r = requests.post(url, json=data, verify=False) - if r.status_code is not 200: + if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) if 400 <= r.status_code < 500: try: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 0699731c13..6bb254f899 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -57,8 +57,8 @@ class DirectoryHandler(BaseHandler): # general association creation for both human users and app services for wchar in string.whitespace: - if wchar in room_alias.localpart: - raise SynapseError(400, "Invalid characters in room alias") + if wchar in room_alias.localpart: + raise SynapseError(400, "Invalid characters in room alias") if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index f89dabb9eb..083f2e0ac3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -102,7 +102,7 @@ class FederationHandler(BaseHandler): self.hs = hs - self.store = hs.get_datastore() # type: synapse.storage.DataStore + self.store = hs.get_datastore() self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self.server_name = hs.hostname diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index ecbf364a5e..8bd96b1178 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -84,7 +84,7 @@ def _rule_to_template(rule): templaterule["pattern"] = thecond["pattern"] if unscoped_rule_id: - templaterule['rule_id'] = unscoped_rule_id + templaterule['rule_id'] = unscoped_rule_id if 'default' in rule: templaterule['default'] = rule['default'] return templaterule diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 24329879e5..42cd3c83ad 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -317,7 +317,7 @@ class DataStore(RoomMemberStore, RoomStore, thirty_days_ago_in_secs)) for row in txn: - if row[0] is 'unknown': + if row[0] == 'unknown': pass results[row[0]] = row[1] diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 3e1915fb87..81b250480d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -904,106 +904,106 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore def _update_current_state_txn(self, txn, state_delta_by_room, max_stream_order): for room_id, current_state_tuple in iteritems(state_delta_by_room): - to_delete, to_insert = current_state_tuple - - # First we add entries to the current_state_delta_stream. We - # do this before updating the current_state_events table so - # that we can use it to calculate the `prev_event_id`. (This - # allows us to not have to pull out the existing state - # unnecessarily). - sql = """ - INSERT INTO current_state_delta_stream - (stream_id, room_id, type, state_key, event_id, prev_event_id) - SELECT ?, ?, ?, ?, ?, ( - SELECT event_id FROM current_state_events - WHERE room_id = ? AND type = ? AND state_key = ? - ) - """ - txn.executemany(sql, ( - ( - max_stream_order, room_id, etype, state_key, None, - room_id, etype, state_key, - ) - for etype, state_key in to_delete - # We sanity check that we're deleting rather than updating - if (etype, state_key) not in to_insert - )) - txn.executemany(sql, ( - ( - max_stream_order, room_id, etype, state_key, ev_id, - room_id, etype, state_key, - ) - for (etype, state_key), ev_id in iteritems(to_insert) - )) + to_delete, to_insert = current_state_tuple - # Now we actually update the current_state_events table - - txn.executemany( - "DELETE FROM current_state_events" - " WHERE room_id = ? AND type = ? AND state_key = ?", - ( - (room_id, etype, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), + # First we add entries to the current_state_delta_stream. We + # do this before updating the current_state_events table so + # that we can use it to calculate the `prev_event_id`. (This + # allows us to not have to pull out the existing state + # unnecessarily). + sql = """ + INSERT INTO current_state_delta_stream + (stream_id, room_id, type, state_key, event_id, prev_event_id) + SELECT ?, ?, ?, ?, ?, ( + SELECT event_id FROM current_state_events + WHERE room_id = ? AND type = ? AND state_key = ? ) - - self._simple_insert_many_txn( - txn, - table="current_state_events", - values=[ - { - "event_id": ev_id, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - } - for key, ev_id in iteritems(to_insert) - ], + """ + txn.executemany(sql, ( + ( + max_stream_order, room_id, etype, state_key, None, + room_id, etype, state_key, ) - - txn.call_after( - self._curr_state_delta_stream_cache.entity_has_changed, - room_id, max_stream_order, + for etype, state_key in to_delete + # We sanity check that we're deleting rather than updating + if (etype, state_key) not in to_insert + )) + txn.executemany(sql, ( + ( + max_stream_order, room_id, etype, state_key, ev_id, + room_id, etype, state_key, ) + for (etype, state_key), ev_id in iteritems(to_insert) + )) - # Invalidate the various caches - - # Figure out the changes of membership to invalidate the - # `get_rooms_for_user` cache. - # We find out which membership events we may have deleted - # and which we have added, then we invlidate the caches for all - # those users. - members_changed = set( - state_key - for ev_type, state_key in itertools.chain(to_delete, to_insert) - if ev_type == EventTypes.Member - ) + # Now we actually update the current_state_events table - for member in members_changed: - self._invalidate_cache_and_stream( - txn, self.get_rooms_for_user_with_stream_ordering, (member,) - ) + txn.executemany( + "DELETE FROM current_state_events" + " WHERE room_id = ? AND type = ? AND state_key = ?", + ( + (room_id, etype, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) - for host in set(get_domain_from_id(u) for u in members_changed): - self._invalidate_cache_and_stream( - txn, self.is_host_joined, (room_id, host) - ) - self._invalidate_cache_and_stream( - txn, self.was_host_joined, (room_id, host) - ) + self._simple_insert_many_txn( + txn, + table="current_state_events", + values=[ + { + "event_id": ev_id, + "room_id": room_id, + "type": key[0], + "state_key": key[1], + } + for key, ev_id in iteritems(to_insert) + ], + ) + + txn.call_after( + self._curr_state_delta_stream_cache.entity_has_changed, + room_id, max_stream_order, + ) + + # Invalidate the various caches + + # Figure out the changes of membership to invalidate the + # `get_rooms_for_user` cache. + # We find out which membership events we may have deleted + # and which we have added, then we invlidate the caches for all + # those users. + members_changed = set( + state_key + for ev_type, state_key in itertools.chain(to_delete, to_insert) + if ev_type == EventTypes.Member + ) + for member in members_changed: self._invalidate_cache_and_stream( - txn, self.get_users_in_room, (room_id,) + txn, self.get_rooms_for_user_with_stream_ordering, (member,) ) + for host in set(get_domain_from_id(u) for u in members_changed): self._invalidate_cache_and_stream( - txn, self.get_room_summary, (room_id,) + txn, self.is_host_joined, (room_id, host) ) - self._invalidate_cache_and_stream( - txn, self.get_current_state_ids, (room_id,) + txn, self.was_host_joined, (room_id, host) ) + self._invalidate_cache_and_stream( + txn, self.get_users_in_room, (room_id,) + ) + + self._invalidate_cache_and_stream( + txn, self.get_room_summary, (room_id,) + ) + + self._invalidate_cache_and_stream( + txn, self.get_current_state_ids, (room_id,) + ) + def _update_forward_extremities_txn(self, txn, new_forward_extremities, max_stream_order): for room_id, new_extrem in iteritems(new_forward_extremities): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index ebe1429acb..57dae324c7 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -220,7 +220,7 @@ class EventsWorkerStore(SQLBaseStore): defer.returnValue(events) def _invalidate_get_event_cache(self, event_id): - self._get_event_cache.invalidate((event_id,)) + self._get_event_cache.invalidate((event_id,)) def _get_events_from_cache(self, events, allow_rejected, update_metrics=True): """Fetch events from the caches diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 81403727c5..5568a607c7 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -11,7 +11,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver( self.addCleanup - ) # type: synapse.server.HomeServer + ) self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index b83f7336d3..11fb8c0c19 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -20,9 +20,6 @@ import tests.utils class EndToEndKeyStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(EndToEndKeyStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore @defer.inlineCallbacks def setUp(self): diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 47f4a8ceac..0d2dc9f325 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -22,9 +22,6 @@ import tests.utils class KeyStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(KeyStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.keys.KeyStore @defer.inlineCallbacks def setUp(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index a1f99134dc..99cd3e09eb 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -28,9 +28,6 @@ logger = logging.getLogger(__name__) class StateStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(StateStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore @defer.inlineCallbacks def setUp(self): -- cgit 1.5.1