diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 3038df4ab8..4f9c3c9db8 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -814,17 +814,16 @@ class Auth(object):
return auth_ids
- @log_function
- def _can_send_event(self, event, auth_events):
+ def _get_send_level(self, etype, state_key, auth_events):
key = (EventTypes.PowerLevels, "", )
send_level_event = auth_events.get(key)
send_level = None
if send_level_event:
send_level = send_level_event.content.get("events", {}).get(
- event.type
+ etype
)
if send_level is None:
- if hasattr(event, "state_key"):
+ if state_key is not None:
send_level = send_level_event.content.get(
"state_default", 50
)
@@ -838,6 +837,13 @@ class Auth(object):
else:
send_level = 0
+ return send_level
+
+ @log_function
+ def _can_send_event(self, event, auth_events):
+ send_level = self._get_send_level(
+ event.type, event.get("state_key", None), auth_events
+ )
user_level = self._get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
@@ -982,3 +988,43 @@ class Auth(object):
"You don't have permission to add ops level greater "
"than your own"
)
+
+ @defer.inlineCallbacks
+ def check_can_change_room_list(self, room_id, user):
+ """Check if the user is allowed to edit the room's entry in the
+ published room list.
+
+ Args:
+ room_id (str)
+ user (UserID)
+ """
+
+ is_admin = yield self.is_server_admin(user)
+ if is_admin:
+ defer.returnValue(True)
+
+ user_id = user.to_string()
+ yield self.check_joined_room(room_id, user_id)
+
+ # We currently require the user is a "moderator" in the room. We do this
+ # by checking if they would (theoretically) be able to change the
+ # m.room.aliases events
+ power_level_event = yield self.state.get_current_state(
+ room_id, EventTypes.PowerLevels, ""
+ )
+
+ auth_events = {}
+ if power_level_event:
+ auth_events[(EventTypes.PowerLevels, "")] = power_level_event
+
+ send_level = self._get_send_level(
+ EventTypes.Aliases, "", auth_events
+ )
+ user_level = self._get_user_power_level(user_id, auth_events)
+
+ if user_level < send_level:
+ raise AuthError(
+ 403,
+ "This server requires you to be a moderator in the room to"
+ " edit its room list entry"
+ )
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 76820b924b..429ab6ddec 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -531,7 +531,6 @@ class FederationServer(FederationBase):
yield self.handler.on_receive_pdu(
origin,
pdu,
- backfilled=False,
state=state,
auth_chain=auth_chain,
)
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 6bcc5a5e2b..8eeb225811 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -317,3 +317,25 @@ class DirectoryHandler(BaseHandler):
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
defer.returnValue(is_admin)
+
+ @defer.inlineCallbacks
+ def edit_published_room_list(self, requester, room_id, visibility):
+ """Edit the entry of the room in the published room list.
+
+ requester
+ room_id (str)
+ visibility (str): "public" or "private"
+ """
+ if requester.is_guest:
+ raise AuthError(403, "Guests cannot edit the published room list")
+
+ if visibility not in ["public", "private"]:
+ raise SynapseError(400, "Invalid visibility setting")
+
+ room = yield self.store.get_room(room_id)
+ if room is None:
+ raise SynapseError(400, "Unknown room")
+
+ yield self.auth.check_can_change_room_list(room_id, requester.user)
+
+ yield self.store.set_room_is_public(room_id, visibility == "public")
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index f599e817aa..267fedf114 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -102,7 +102,7 @@ class FederationHandler(BaseHandler):
@log_function
@defer.inlineCallbacks
- def on_receive_pdu(self, origin, pdu, backfilled, state=None,
+ def on_receive_pdu(self, origin, pdu, state=None,
auth_chain=None):
""" Called by the ReplicationLayer when we have a new pdu. We need to
do auth checks and put it through the StateHandler.
@@ -123,7 +123,6 @@ class FederationHandler(BaseHandler):
# FIXME (erikj): Awful hack to make the case where we are not currently
# in the room work
- current_state = None
is_in_room = yield self.auth.check_host_in_room(
event.room_id,
self.server_name
@@ -186,8 +185,6 @@ class FederationHandler(BaseHandler):
origin,
event,
state=state,
- backfilled=backfilled,
- current_state=current_state,
)
except AuthError as e:
raise FederationError(
@@ -216,18 +213,17 @@ class FederationHandler(BaseHandler):
except StoreError:
logger.exception("Failed to store room.")
- if not backfilled:
- extra_users = []
- if event.type == EventTypes.Member:
- target_user_id = event.state_key
- target_user = UserID.from_string(target_user_id)
- extra_users.append(target_user)
+ extra_users = []
+ if event.type == EventTypes.Member:
+ target_user_id = event.state_key
+ target_user = UserID.from_string(target_user_id)
+ extra_users.append(target_user)
- with PreserveLoggingContext():
- self.notifier.on_new_room_event(
- event, event_stream_id, max_stream_id,
- extra_users=extra_users
- )
+ with PreserveLoggingContext():
+ self.notifier.on_new_room_event(
+ event, event_stream_id, max_stream_id,
+ extra_users=extra_users
+ )
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
@@ -647,7 +643,7 @@ class FederationHandler(BaseHandler):
continue
try:
- self.on_receive_pdu(origin, p, backfilled=False)
+ self.on_receive_pdu(origin, p)
except:
logger.exception("Couldn't handle pdu")
@@ -779,7 +775,6 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
- backfilled=False,
)
target_user = UserID.from_string(event.state_key)
@@ -819,7 +814,6 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
- backfilled=False,
)
target_user = UserID.from_string(event.state_key)
@@ -1074,8 +1068,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
- def _handle_new_event(self, origin, event, state=None, backfilled=False,
- current_state=None, auth_events=None):
+ def _handle_new_event(self, origin, event, state=None, auth_events=None):
outlier = event.internal_metadata.is_outlier()
@@ -1085,7 +1078,7 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
- if not backfilled and not event.internal_metadata.is_outlier():
+ if not event.internal_metadata.is_outlier():
action_generator = ActionGenerator(self.hs)
yield action_generator.handle_push_actions_for_event(
event, context, self
@@ -1094,9 +1087,7 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
- backfilled=backfilled,
- is_new_state=(not outlier and not backfilled),
- current_state=current_state,
+ is_new_state=not outlier,
)
defer.returnValue((context, event_stream_id, max_stream_id))
@@ -1194,7 +1185,6 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event, new_event_context,
- backfilled=False,
is_new_state=True,
current_state=state,
)
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 59a23d6cb6..8ac09419dc 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -30,6 +30,7 @@ logger = logging.getLogger(__name__)
def register_servlets(hs, http_server):
ClientDirectoryServer(hs).register(http_server)
+ ClientDirectoryListServer(hs).register(http_server)
class ClientDirectoryServer(ClientV1RestServlet):
@@ -137,3 +138,44 @@ class ClientDirectoryServer(ClientV1RestServlet):
)
defer.returnValue((200, {}))
+
+
+class ClientDirectoryListServer(ClientV1RestServlet):
+ PATTERNS = client_path_patterns("/directory/list/room/(?P<room_id>[^/]*)$")
+
+ def __init__(self, hs):
+ super(ClientDirectoryListServer, self).__init__(hs)
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, room_id):
+ room = yield self.store.get_room(room_id)
+ if room is None:
+ raise SynapseError(400, "Unknown room")
+
+ defer.returnValue((200, {
+ "visibility": "public" if room["is_public"] else "private"
+ }))
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, room_id):
+ requester = yield self.auth.get_user_by_req(request)
+
+ content = parse_json_object_from_request(request)
+ visibility = content.get("visibility", "public")
+
+ yield self.handlers.directory_handler.edit_published_room_list(
+ requester, room_id, visibility,
+ )
+
+ defer.returnValue((200, {}))
+
+ @defer.inlineCallbacks
+ def on_DELETE(self, request, room_id):
+ requester = yield self.auth.get_user_by_req(request)
+
+ yield self.handlers.directory_handler.edit_published_room_list(
+ requester, room_id, "private",
+ )
+
+ defer.returnValue((200, {}))
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 7dc67ecd57..583b77a835 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -26,6 +26,10 @@ from twisted.internet import defer
import sys
import time
import threading
+import os
+
+
+CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
logger = logging.getLogger(__name__)
@@ -163,7 +167,9 @@ class SQLBaseStore(object):
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
max_entries=hs.config.event_cache_size)
- self._state_group_cache = DictionaryCache("*stateGroupCache*", 2000)
+ self._state_group_cache = DictionaryCache(
+ "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR
+ )
self._event_fetch_lock = threading.Condition()
self._event_fetch_list = []
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 285c586cfe..e444b64cee 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -101,30 +101,16 @@ class EventsStore(SQLBaseStore):
@defer.inlineCallbacks
@log_function
- def persist_event(self, event, context, backfilled=False,
+ def persist_event(self, event, context,
is_new_state=True, current_state=None):
- stream_ordering = None
- if backfilled:
- self.min_stream_token -= 1
- stream_ordering = self.min_stream_token
-
- if stream_ordering is None:
- stream_ordering_manager = self._stream_id_gen.get_next()
- else:
- @contextmanager
- def stream_ordering_manager():
- yield stream_ordering
- stream_ordering_manager = stream_ordering_manager()
-
try:
- with stream_ordering_manager as stream_ordering:
+ with self._stream_id_gen.get_next() as stream_ordering:
event.internal_metadata.stream_ordering = stream_ordering
yield self.runInteraction(
"persist_event",
self._persist_event_txn,
event=event,
context=context,
- backfilled=backfilled,
is_new_state=is_new_state,
current_state=current_state,
)
@@ -166,7 +152,7 @@ class EventsStore(SQLBaseStore):
defer.returnValue(events[0] if events else None)
@log_function
- def _persist_event_txn(self, txn, event, context, backfilled,
+ def _persist_event_txn(self, txn, event, context,
is_new_state=True, current_state=None):
# We purposefully do this first since if we include a `current_state`
# key, we *want* to update the `current_state_events` table
@@ -198,7 +184,7 @@ class EventsStore(SQLBaseStore):
return self._persist_events_txn(
txn,
[(event, context)],
- backfilled=backfilled,
+ backfilled=False,
is_new_state=is_new_state,
)
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 46ab38a313..9be977f387 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -77,6 +77,14 @@ class RoomStore(SQLBaseStore):
allow_none=True,
)
+ def set_room_is_public(self, room_id, is_public):
+ return self._simple_update_one(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"is_public": is_public},
+ desc="set_room_is_public",
+ )
+
def get_public_room_ids(self):
return self._simple_select_onecol(
table="rooms",
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 8ed8a21b0a..f06c734c4e 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -172,7 +172,7 @@ class StateStore(SQLBaseStore):
defer.returnValue(events)
def _get_state_groups_from_groups(self, groups, types):
- """Returns dictionary state_group -> state event ids
+ """Returns dictionary state_group -> (dict of (type, state_key) -> event id)
"""
def f(txn, groups):
if types is not None:
@@ -183,7 +183,8 @@ class StateStore(SQLBaseStore):
where_clause = ""
sql = (
- "SELECT state_group, event_id FROM state_groups_state WHERE"
+ "SELECT state_group, event_id, type, state_key"
+ " FROM state_groups_state WHERE"
" state_group IN (%s) %s" % (
",".join("?" for _ in groups),
where_clause,
@@ -199,7 +200,8 @@ class StateStore(SQLBaseStore):
results = {}
for row in rows:
- results.setdefault(row["state_group"], []).append(row["event_id"])
+ key = (row["type"], row["state_key"])
+ results.setdefault(row["state_group"], {})[key] = row["event_id"]
return results
chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)]
@@ -296,7 +298,7 @@ class StateStore(SQLBaseStore):
where a `state_key` of `None` matches all state_keys for the
`type`.
"""
- is_all, state_dict = self._state_group_cache.get(group)
+ is_all, state_dict_ids = self._state_group_cache.get(group)
type_to_key = {}
missing_types = set()
@@ -308,7 +310,7 @@ class StateStore(SQLBaseStore):
if type_to_key.get(typ, object()) is not None:
type_to_key.setdefault(typ, set()).add(state_key)
- if (typ, state_key) not in state_dict:
+ if (typ, state_key) not in state_dict_ids:
missing_types.add((typ, state_key))
sentinel = object()
@@ -326,7 +328,7 @@ class StateStore(SQLBaseStore):
got_all = not (missing_types or types is None)
return {
- k: v for k, v in state_dict.items()
+ k: v for k, v in state_dict_ids.items()
if include(k[0], k[1])
}, missing_types, got_all
@@ -340,8 +342,9 @@ class StateStore(SQLBaseStore):
Args:
group: The state group to lookup
"""
- is_all, state_dict = self._state_group_cache.get(group)
- return state_dict, is_all
+ is_all, state_dict_ids = self._state_group_cache.get(group)
+
+ return state_dict_ids, is_all
@defer.inlineCallbacks
def _get_state_for_groups(self, groups, types=None):
@@ -354,84 +357,69 @@ class StateStore(SQLBaseStore):
missing_groups = []
if types is not None:
for group in set(groups):
- state_dict, missing_types, got_all = self._get_some_state_from_cache(
+ state_dict_ids, missing_types, got_all = self._get_some_state_from_cache(
group, types
)
- results[group] = state_dict
+ results[group] = state_dict_ids
if not got_all:
missing_groups.append(group)
else:
for group in set(groups):
- state_dict, got_all = self._get_all_state_from_cache(
+ state_dict_ids, got_all = self._get_all_state_from_cache(
group
)
- results[group] = state_dict
+
+ results[group] = state_dict_ids
if not got_all:
missing_groups.append(group)
- if not missing_groups:
- defer.returnValue({
- group: {
- type_tuple: event
- for type_tuple, event in state.items()
- if event
- }
- for group, state in results.items()
- })
+ if missing_groups:
+ # Okay, so we have some missing_types, lets fetch them.
+ cache_seq_num = self._state_group_cache.sequence
- # Okay, so we have some missing_types, lets fetch them.
- cache_seq_num = self._state_group_cache.sequence
+ group_to_state_dict = yield self._get_state_groups_from_groups(
+ missing_groups, types
+ )
- group_state_dict = yield self._get_state_groups_from_groups(
- missing_groups, types
- )
+ # Now we want to update the cache with all the things we fetched
+ # from the database.
+ for group, group_state_dict in group_to_state_dict.items():
+ if types:
+ # We delibrately put key -> None mappings into the cache to
+ # cache absence of the key, on the assumption that if we've
+ # explicitly asked for some types then we will probably ask
+ # for them again.
+ state_dict = {key: None for key in types}
+ state_dict.update(results[group])
+ results[group] = state_dict
+ else:
+ state_dict = results[group]
+
+ state_dict.update(group_state_dict)
+
+ self._state_group_cache.update(
+ cache_seq_num,
+ key=group,
+ value=state_dict,
+ full=(types is None),
+ )
state_events = yield self._get_events(
- [e_id for l in group_state_dict.values() for e_id in l],
+ [ev_id for sd in results.values() for ev_id in sd.values()],
get_prev_content=False
)
state_events = {e.event_id: e for e in state_events}
- # Now we want to update the cache with all the things we fetched
- # from the database.
- for group, state_ids in group_state_dict.items():
- if types:
- # We delibrately put key -> None mappings into the cache to
- # cache absence of the key, on the assumption that if we've
- # explicitly asked for some types then we will probably ask
- # for them again.
- state_dict = {key: None for key in types}
- state_dict.update(results[group])
- results[group] = state_dict
- else:
- state_dict = results[group]
-
- for event_id in state_ids:
- try:
- state_event = state_events[event_id]
- state_dict[(state_event.type, state_event.state_key)] = state_event
- except KeyError:
- # Hmm. So we do don't have that state event? Interesting.
- logger.warn(
- "Can't find state event %r for state group %r",
- event_id, group,
- )
-
- self._state_group_cache.update(
- cache_seq_num,
- key=group,
- value=state_dict,
- full=(types is None),
- )
-
# Remove all the entries with None values. The None values were just
# used for bookkeeping in the cache.
for group, state_dict in results.items():
results[group] = {
- key: event for key, event in state_dict.items() if event
+ key: state_events[event_id]
+ for key, event_id in state_dict.items()
+ if event_id and event_id in state_events
}
defer.returnValue(results)
|