From ec2b5d8c284451179c0f7b72c46dda0dd81f6f5f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Dec 2014 16:21:17 +0000 Subject: Store full JSON of events in db --- synapse/storage/schema/im.sql | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'synapse/storage/schema') diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index 8ba732a23b..cb0c494ddf 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -32,6 +32,16 @@ CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering); CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering); CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); + +CREATE TABLE IF NOT EXISTS event_json( + event_id TEXT NOT NULL, + json BLOB NOT NULL, + CONSTRAINT ev_j_uniq UNIQUE (event_id) +); + +CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); + + CREATE TABLE IF NOT EXISTS state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, -- cgit 1.5.1 From 279c48c8b442ec726fb5088e56ce9c1d2ed4bfb5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 2 Dec 2014 15:09:51 +0000 Subject: Write the upload portion of version 1 of the media repository --- docs/media_repository.rst | 20 +++ synapse/http/content_repository.py | 212 ---------------------------- synapse/http/server.py | 21 ++- synapse/media/v0/content_repository.py | 212 ++++++++++++++++++++++++++++ synapse/media/v1/filepath.py | 53 +++++++ synapse/media/v1/media_repository.py | 72 ++++++++++ synapse/media/v1/upload_resource.py | 110 +++++++++++++++ synapse/storage/media_repository.py | 116 +++++++++++++++ synapse/storage/schema/media_repository.sql | 66 +++++++++ 9 files changed, 663 insertions(+), 219 deletions(-) create mode 100644 docs/media_repository.rst delete mode 100644 synapse/http/content_repository.py create mode 100644 synapse/media/v0/content_repository.py create mode 100644 synapse/media/v1/filepath.py create mode 100644 synapse/media/v1/media_repository.py create mode 100644 synapse/media/v1/upload_resource.py create mode 100644 synapse/storage/media_repository.py create mode 100644 synapse/storage/schema/media_repository.sql (limited to 'synapse/storage/schema') diff --git a/docs/media_repository.rst b/docs/media_repository.rst new file mode 100644 index 0000000000..e554d0f495 --- /dev/null +++ b/docs/media_repository.rst @@ -0,0 +1,20 @@ +Media Repository +================ + +The media repository is where attachments and avatar photos are stored. +It stores attachment content and thumbnails for media uploaded by local users. +It caches attachment content and thumbnails for media uploaded by remote users. + +Storage +------- + +Each item of media is assigned a ``media_id`` when it is uploaded. +The ``media_id`` is a randomly chosen, URL safe 24 character string. +Metadata such as the MIME type, upload time and length are stored in the +sqlite3 database indexed by ``media_id``. +Content is stored on the filesystem under a "content" directory. Thumbnails are +stored under a "thumbnails" directory. +The item with ``media_id`` ``"aabbccccccccdddddddddddd"`` is stored under +``"local/content/aa/bb/ccccccccdddddddddddd"``. Its thumbnail with width +``128`` and height ``96`` and type ``"image/jpeg"`` is stored under +``"local/thumbnails/aa/bb/ccccccccdddddddddddd/128-96-image-jpeg"`` diff --git a/synapse/http/content_repository.py b/synapse/http/content_repository.py deleted file mode 100644 index 64ecb5346e..0000000000 --- a/synapse/http/content_repository.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .server import respond_with_json_bytes - -from synapse.util.stringutils import random_string -from synapse.api.errors import ( - cs_exception, SynapseError, CodeMessageException, Codes, cs_error -) - -from twisted.protocols.basic import FileSender -from twisted.web import server, resource -from twisted.internet import defer - -import base64 -import json -import logging -import os -import re - -logger = logging.getLogger(__name__) - - -class ContentRepoResource(resource.Resource): - """Provides file uploading and downloading. - - Uploads are POSTed to wherever this Resource is linked to. This resource - returns a "content token" which can be used to GET this content again. The - token is typically a path, but it may not be. Tokens can expire, be - one-time uses, etc. - - In this case, the token is a path to the file and contains 3 interesting - sections: - - User ID base64d (for namespacing content to each user) - - random 24 char string - - Content type base64d (so we can return it when clients GET it) - - """ - isLeaf = True - - def __init__(self, hs, directory, auth, external_addr): - resource.Resource.__init__(self) - self.hs = hs - self.directory = directory - self.auth = auth - self.external_addr = external_addr.rstrip('/') - self.max_upload_size = hs.config.max_upload_size - - if not os.path.isdir(self.directory): - os.mkdir(self.directory) - logger.info("ContentRepoResource : Created %s directory.", - self.directory) - - @defer.inlineCallbacks - def map_request_to_name(self, request): - # auth the user - auth_user = yield self.auth.get_user_by_req(request) - - # namespace all file uploads on the user - prefix = base64.urlsafe_b64encode( - auth_user.to_string() - ).replace('=', '') - - # use a random string for the main portion - main_part = random_string(24) - - # suffix with a file extension if we can make one. This is nice to - # provide a hint to clients on the file information. We will also reuse - # this info to spit back the content type to the client. - suffix = "" - if request.requestHeaders.hasHeader("Content-Type"): - content_type = request.requestHeaders.getRawHeaders( - "Content-Type")[0] - suffix = "." + base64.urlsafe_b64encode(content_type) - if (content_type.split("/")[0].lower() in - ["image", "video", "audio"]): - file_ext = content_type.split("/")[-1] - # be a little paranoid and only allow a-z - file_ext = re.sub("[^a-z]", "", file_ext) - suffix += "." + file_ext - - file_name = prefix + main_part + suffix - file_path = os.path.join(self.directory, file_name) - logger.info("User %s is uploading a file to path %s", - auth_user.to_string(), - file_path) - - # keep trying to make a non-clashing file, with a sensible max attempts - attempts = 0 - while os.path.exists(file_path): - main_part = random_string(24) - file_name = prefix + main_part + suffix - file_path = os.path.join(self.directory, file_name) - attempts += 1 - if attempts > 25: # really? Really? - raise SynapseError(500, "Unable to create file.") - - defer.returnValue(file_path) - - def render_GET(self, request): - # no auth here on purpose, to allow anyone to view, even across home - # servers. - - # TODO: A little crude here, we could do this better. - filename = request.path.split('/')[-1] - # be paranoid - filename = re.sub("[^0-9A-z.-_]", "", filename) - - file_path = self.directory + "/" + filename - - logger.debug("Searching for %s", file_path) - - if os.path.isfile(file_path): - # filename has the content type - base64_contentype = filename.split(".")[1] - content_type = base64.urlsafe_b64decode(base64_contentype) - logger.info("Sending file %s", file_path) - f = open(file_path, 'rb') - request.setHeader('Content-Type', content_type) - - # cache for at least a day. - # XXX: we might want to turn this off for data we don't want to - # recommend caching as it's sensitive or private - or at least - # select private. don't bother setting Expires as all our matrix - # clients are smart enough to be happy with Cache-Control (right?) - request.setHeader( - "Cache-Control", "public,max-age=86400,s-maxage=86400" - ) - - d = FileSender().beginFileTransfer(f, request) - - # after the file has been sent, clean up and finish the request - def cbFinished(ignored): - f.close() - request.finish() - d.addCallback(cbFinished) - else: - respond_with_json_bytes( - request, - 404, - json.dumps(cs_error("Not found", code=Codes.NOT_FOUND)), - send_cors=True) - - return server.NOT_DONE_YET - - def render_POST(self, request): - self._async_render(request) - return server.NOT_DONE_YET - - def render_OPTIONS(self, request): - respond_with_json_bytes(request, 200, {}, send_cors=True) - return server.NOT_DONE_YET - - @defer.inlineCallbacks - def _async_render(self, request): - try: - # TODO: The checks here are a bit late. The content will have - # already been uploaded to a tmp file at this point - content_length = request.getHeader("Content-Length") - if content_length is None: - raise SynapseError( - msg="Request must specify a Content-Length", code=400 - ) - if int(content_length) > self.max_upload_size: - raise SynapseError( - msg="Upload request body is too large", - code=413, - ) - - fname = yield self.map_request_to_name(request) - - # TODO I have a suspicious feeling this is just going to block - with open(fname, "wb") as f: - f.write(request.content.read()) - - # FIXME (erikj): These should use constants. - file_name = os.path.basename(fname) - # FIXME: we can't assume what the repo's public mounted path is - # ...plus self-signed SSL won't work to remote clients anyway - # ...and we can't assume that it's SSL anyway, as we might want to - # serve it via the non-SSL listener... - url = "%s/_matrix/content/%s" % ( - self.external_addr, file_name - ) - - respond_with_json_bytes(request, 200, - json.dumps({"content_token": url}), - send_cors=True) - - except CodeMessageException as e: - logger.exception(e) - respond_with_json_bytes(request, e.code, - json.dumps(cs_exception(e))) - except Exception as e: - logger.error("Failed to store file: %s" % e) - respond_with_json_bytes( - request, - 500, - json.dumps({"error": "Internal server error"}), - send_cors=True) diff --git a/synapse/http/server.py b/synapse/http/server.py index 8024ff5bde..046e230361 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -166,14 +166,10 @@ class JsonResource(HttpServer, resource.Resource): request) return - if not self._request_user_agent_is_curl(request): - json_bytes = encode_canonical_json(response_json_object) - else: - json_bytes = encode_pretty_printed_json(response_json_object) - # TODO: Only enable CORS for the requests that need it. - respond_with_json_bytes(request, code, json_bytes, send_cors=True, - response_code_message=response_code_message) + respond_with_json(request, code, response_json_object, send_cors=True, + response_code_message=response_code_message, + pretty_print=self._request_user_agent_is_curl) @staticmethod def _request_user_agent_is_curl(request): @@ -202,6 +198,17 @@ class RootRedirect(resource.Resource): return resource.Resource.getChild(self, name, request) +def respond_with_json(request, code, json_object, send_cors=False, + response_code_message=None, pretty_print=False): + if not pretty_print: + json_bytes = encode_pretty_printed_json(response_json_object) + else: + json_bytes = encode_canonical_json(response_json_object) + + return respond_with_json_bytes(request, code, json_bytes, send_cors, + response_code_message=response_code_message) + + def respond_with_json_bytes(request, code, json_bytes, send_cors=False, response_code_message=None): """Sends encoded JSON in response to the given request. diff --git a/synapse/media/v0/content_repository.py b/synapse/media/v0/content_repository.py new file mode 100644 index 0000000000..64ecb5346e --- /dev/null +++ b/synapse/media/v0/content_repository.py @@ -0,0 +1,212 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .server import respond_with_json_bytes + +from synapse.util.stringutils import random_string +from synapse.api.errors import ( + cs_exception, SynapseError, CodeMessageException, Codes, cs_error +) + +from twisted.protocols.basic import FileSender +from twisted.web import server, resource +from twisted.internet import defer + +import base64 +import json +import logging +import os +import re + +logger = logging.getLogger(__name__) + + +class ContentRepoResource(resource.Resource): + """Provides file uploading and downloading. + + Uploads are POSTed to wherever this Resource is linked to. This resource + returns a "content token" which can be used to GET this content again. The + token is typically a path, but it may not be. Tokens can expire, be + one-time uses, etc. + + In this case, the token is a path to the file and contains 3 interesting + sections: + - User ID base64d (for namespacing content to each user) + - random 24 char string + - Content type base64d (so we can return it when clients GET it) + + """ + isLeaf = True + + def __init__(self, hs, directory, auth, external_addr): + resource.Resource.__init__(self) + self.hs = hs + self.directory = directory + self.auth = auth + self.external_addr = external_addr.rstrip('/') + self.max_upload_size = hs.config.max_upload_size + + if not os.path.isdir(self.directory): + os.mkdir(self.directory) + logger.info("ContentRepoResource : Created %s directory.", + self.directory) + + @defer.inlineCallbacks + def map_request_to_name(self, request): + # auth the user + auth_user = yield self.auth.get_user_by_req(request) + + # namespace all file uploads on the user + prefix = base64.urlsafe_b64encode( + auth_user.to_string() + ).replace('=', '') + + # use a random string for the main portion + main_part = random_string(24) + + # suffix with a file extension if we can make one. This is nice to + # provide a hint to clients on the file information. We will also reuse + # this info to spit back the content type to the client. + suffix = "" + if request.requestHeaders.hasHeader("Content-Type"): + content_type = request.requestHeaders.getRawHeaders( + "Content-Type")[0] + suffix = "." + base64.urlsafe_b64encode(content_type) + if (content_type.split("/")[0].lower() in + ["image", "video", "audio"]): + file_ext = content_type.split("/")[-1] + # be a little paranoid and only allow a-z + file_ext = re.sub("[^a-z]", "", file_ext) + suffix += "." + file_ext + + file_name = prefix + main_part + suffix + file_path = os.path.join(self.directory, file_name) + logger.info("User %s is uploading a file to path %s", + auth_user.to_string(), + file_path) + + # keep trying to make a non-clashing file, with a sensible max attempts + attempts = 0 + while os.path.exists(file_path): + main_part = random_string(24) + file_name = prefix + main_part + suffix + file_path = os.path.join(self.directory, file_name) + attempts += 1 + if attempts > 25: # really? Really? + raise SynapseError(500, "Unable to create file.") + + defer.returnValue(file_path) + + def render_GET(self, request): + # no auth here on purpose, to allow anyone to view, even across home + # servers. + + # TODO: A little crude here, we could do this better. + filename = request.path.split('/')[-1] + # be paranoid + filename = re.sub("[^0-9A-z.-_]", "", filename) + + file_path = self.directory + "/" + filename + + logger.debug("Searching for %s", file_path) + + if os.path.isfile(file_path): + # filename has the content type + base64_contentype = filename.split(".")[1] + content_type = base64.urlsafe_b64decode(base64_contentype) + logger.info("Sending file %s", file_path) + f = open(file_path, 'rb') + request.setHeader('Content-Type', content_type) + + # cache for at least a day. + # XXX: we might want to turn this off for data we don't want to + # recommend caching as it's sensitive or private - or at least + # select private. don't bother setting Expires as all our matrix + # clients are smart enough to be happy with Cache-Control (right?) + request.setHeader( + "Cache-Control", "public,max-age=86400,s-maxage=86400" + ) + + d = FileSender().beginFileTransfer(f, request) + + # after the file has been sent, clean up and finish the request + def cbFinished(ignored): + f.close() + request.finish() + d.addCallback(cbFinished) + else: + respond_with_json_bytes( + request, + 404, + json.dumps(cs_error("Not found", code=Codes.NOT_FOUND)), + send_cors=True) + + return server.NOT_DONE_YET + + def render_POST(self, request): + self._async_render(request) + return server.NOT_DONE_YET + + def render_OPTIONS(self, request): + respond_with_json_bytes(request, 200, {}, send_cors=True) + return server.NOT_DONE_YET + + @defer.inlineCallbacks + def _async_render(self, request): + try: + # TODO: The checks here are a bit late. The content will have + # already been uploaded to a tmp file at this point + content_length = request.getHeader("Content-Length") + if content_length is None: + raise SynapseError( + msg="Request must specify a Content-Length", code=400 + ) + if int(content_length) > self.max_upload_size: + raise SynapseError( + msg="Upload request body is too large", + code=413, + ) + + fname = yield self.map_request_to_name(request) + + # TODO I have a suspicious feeling this is just going to block + with open(fname, "wb") as f: + f.write(request.content.read()) + + # FIXME (erikj): These should use constants. + file_name = os.path.basename(fname) + # FIXME: we can't assume what the repo's public mounted path is + # ...plus self-signed SSL won't work to remote clients anyway + # ...and we can't assume that it's SSL anyway, as we might want to + # serve it via the non-SSL listener... + url = "%s/_matrix/content/%s" % ( + self.external_addr, file_name + ) + + respond_with_json_bytes(request, 200, + json.dumps({"content_token": url}), + send_cors=True) + + except CodeMessageException as e: + logger.exception(e) + respond_with_json_bytes(request, e.code, + json.dumps(cs_exception(e))) + except Exception as e: + logger.error("Failed to store file: %s" % e) + respond_with_json_bytes( + request, + 500, + json.dumps({"error": "Internal server error"}), + send_cors=True) diff --git a/synapse/media/v1/filepath.py b/synapse/media/v1/filepath.py new file mode 100644 index 0000000000..d23564e03e --- /dev/null +++ b/synapse/media/v1/filepath.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + + +class MediaFilePaths(object): + + def __init__(self, base_path): + self.base_path = base_path + + def local_media_filepath(self, media_id): + return os.path.join( + self.base_path, "local", "content", + media_id[0:2], media_id[2:4], media_id[4:] + ) + + def local_media_thumbnail(self, media_id, width, height, content_type): + top_level_type, sub_type = content_type.split("/") + file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + return os.path.join( + self.base_path, "local", "thumbnails", + media_id[0:2], media_id[2:4], media_id[4:], + file_name + ) + + def remote_media_filepath(self, server_name, file_id): + return os.path.join( + self.base_path, "remote", "content", server_name, + file_id[0:2], file_id[2:4], file_id[4:] + ) + + def remote_media_thumbnail(self, server_name, file_id, width, height, + content_type): + top_level_type, sub_type = content_type.split("/") + file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + return os.path.join( + self.base_path, "remote", "content", server_name, + file_id[0:2], file_id[2:4], file_id[4:], + file_name + ) diff --git a/synapse/media/v1/media_repository.py b/synapse/media/v1/media_repository.py new file mode 100644 index 0000000000..9c36a8e933 --- /dev/null +++ b/synapse/media/v1/media_repository.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.http.server import respond_with_json_bytes + +from synapse.util.stringutils import random_string +from synapse.api.errors import ( + cs_exception, SynapseError, CodeMessageException, Codes, cs_error +) + +from twisted.protocols.basic import FileSender +from twisted.web import server, resource +from twisted.internet import defer + +import base64 +import json +import logging +import os +import re + +logger = logging.getLogger(__name__) + + +class MediaRepository(): + """Profiles file uploading and downloading. + + Uploads are POSTed to a resource which returns a token which is used to GET + the download:: + + => POST /_matrix/media/v1/upload HTTP/1.1 + Content-Type: + + + + <= HTTP/1.1 200 OK + Content-Type: application/json + + { "token": } + + => GET /_matrix/media/v1/download/ HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: + Content-Disposition: attachment;filename= + + + + Clients can get thumbnails by supplying a desired width and height:: + + => GET /_matrix/media/v1/thumbnail/?width=&height= HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: image/jpeg or image/png + + + """ + + def __init__(self, hs): + filepaths = MediaFilePaths + diff --git a/synapse/media/v1/upload_resource.py b/synapse/media/v1/upload_resource.py new file mode 100644 index 0000000000..3721a0173d --- /dev/null +++ b/synapse/media/v1/upload_resource.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.http.server import respond_with_json + +from synapse.util.stringutils import random_string +from synapse.api.errors import ( + cs_exception, SynapseError, CodeMessageException +) + +from twisted.web import server, resource +from twisted.internet import defer + +import logging + +logger = logging.getLogger(__name__) + +class UploadResource(resource.Resource): + + def __init__(self, hs, filepaths): + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.max_upload_size = hs.config.max_upload_size() + self.filepaths = filepaths + + def render_POST(self, request): + self._async_render_POST(request) + return server.NOT_DONE_YET + + def render_OPTIONS(self, request): + respond_with_json(request, 200, {}, send_cors=True) + return server.NOT_DONE_YET + + @defer.inlineCallbacks + def _async_render_POST(self, request): + + auth_user = yield self.auth.get_user_by_req(request) + + try: + # TODO: The checks here are a bit late. The content will have + # already been uploaded to a tmp file at this point + content_length = request.getHeader("Content-Length") + if content_length is None: + raise SynapseError( + msg="Request must specify a Content-Length", code=400 + ) + if int(content_length) > self.max_upload_size: + raise SynapseError( + msg="Upload request body is too large", + code=413, + ) + + headers = request.requestHeaders() + + if headers.hasHeader("Content-Type"): + media_type = headers.getRawHeaders("Content-Type")[0] + else: + raise SynapseError( + msg="Upload request missing 'Content-Type'", + code=400, + ) + + #if headers.hasHeader("Content-Disposition"): + # disposition = headers.getRawHeaders("Content-Disposition")[0] + # TODO(markjh): parse content-dispostion + + media_id = random_string(24) + + fname = self.filepaths.local_media_file_path(media_id) + + # This shouldn't block for very long because the content will have + # already been uploaded at this point. + with open(fname, "wb") as f: + f.write(request.content.read()) + + yield self.store.store_local_media( + media_id=media_id, + media_type=media_type, + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=content_length, + user_id=auth_user, + ) + + respond_with_json( + request, 200, {"content_token": media_id}, send_cors=True + ) + except CodeMessageException as e: + logger.exception(e) + respond_with_json(request, e.code, cs_exception(e), send_cors=True) + except: + logger.exception("Failed to store file") + respond_with_json( + request, + 500, + {"error": "Internal server error"}, + send_cors=True + ) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py new file mode 100644 index 0000000000..73ceba3f2c --- /dev/null +++ b/synapse/storage/media_repository.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright 2014 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from _base import SQLBaseStore + + +class MediaRepositoryStore(SQLBaseStore): + """Persistence for attachments and avatars""" + + def get_local_media(self, media_id): + return self._simple_select_one( + "local_media_repository", + {"media_id": media_id}, + ("media_type", "media_length", "upload_name", "created_ts"), + ) + + def store_local_media(self, media_id, media_type, time_now_ms, upload_name, + media_length, user_id): + return self._simple_insert( + "local_media_repository", + { + "media_id": media_id, + "media_type": media_type, + "created_ts": time_now_ms, + "upload_name": upload_name, + "media_length": media_length, + "user_id": user_id, + } + ) + + def get_local_media_thumbnails(self, media_id): + return self._simple_select_list( + "local_media_thumbnails", + {"media_id": media_id}, + ( + "thumbnail_width", "thumbnail_height", + "thumbnail_type", "thumbnail_length", + ) + ) + + def store_local_thumbnail(self, media_id, thumbnail_width, + thumbnail_height, thumbnail_type, + thumbnail_length): + return self._simple_insert( + "local_media_thumbnails", + { + "media_id": media_id, + "thumbnail_width": thumbnail_width, + "thumbnail_height": thumbnail_height, + "thumbnail_type": thumbnail_type, + "thumbnail_length": thumbnail_length, + } + ) + + def get_cached_remote_media(self, origin, media_id): + return self._simple_select_one( + "remote_media_cache", + {"media_origin": origin, "media_id": media_id}, + ("media_type", "media_length", "upload_name", "created_ts"), + ) + + def store_cached_remote_media(self, origin, media_id, media_type, + media_length, time_now_ms, upload_name, + filesytem_id): + return self._simple_insert( + "remote_media_cache", + { + "media_origin": origin, + "media_id": media_id, + "media_type": media_type, + "media_length": media_length, + "created_ts": time_now_ms, + "upload_name": upload_name, + "filesystem_id": filesystem_id, + } + ) + + def get_remote_media_thumbnails(self, origin, media_id): + return self._simple_select_list( + "remote_media_cache_thumbnails", + {"origin": origin, "media_id": media_id}, + ( + "thumbnail_width", "thumbnail_height", + "thumbnail_type", "thumbnail_length", + "filesystem_id" + ) + ) + + + def store_remote_media_thumbnail(self, origin, media_id, thumbnail_width, + thumbnail_height, thumbnail_type, + thumbnail_length, filesystem_id): + return self._simple_insert( + "remote_media_cache_thumbnails", + { + "media_origin": origin, + "media_id": media_id, + "thumbnail_width": thumbnail_width, + "thumbnail_height": thumbnail_height, + "thumbnail_type": thumbnail_type, + "thumbnail_length": thumbnail_length, + "filesystem_id": filesystem_id, + } + ) diff --git a/synapse/storage/schema/media_repository.sql b/synapse/storage/schema/media_repository.sql new file mode 100644 index 0000000000..768752296f --- /dev/null +++ b/synapse/storage/schema/media_repository.sql @@ -0,0 +1,66 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS local_media_repository ( + media_id TEXT, -- The id used to refer to the media. + media_type TEXT, -- The MIME-type of the media. + media_length INTEGER, -- Length of the media in bytes. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + user_id TEXT, -- The user who uploaded the file. + CONSTRAINT uniqueness UNIQUE (media_id) +); + +CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + CONSTRAINT uniqueness UNIQUE ( + media_id, thumbnail_width, thumbnail_height, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); + +CREATE TABLE IF NOT EXISTS remote_media_cache ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media on that server. + media_type TEXT, -- The MIME-type of the media. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + media_length INTEGER, -- Length of the media in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE (media_origin, media_id) +); + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE ( + media_origin, media_id, thumbnail_width, thumbnail_height, + thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); -- cgit 1.5.1 From a953be097f3431481875dc04662adcc5ba2226a9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 5 Dec 2014 16:30:18 +0000 Subject: Add a method field to thumbnail storage --- synapse/storage/media_repository.py | 15 +++++++++------ synapse/storage/schema/media_repository.sql | 4 +++- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'synapse/storage/schema') diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 2d3a2d1ccb..b3f1fc087b 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -50,20 +50,21 @@ class MediaRepositoryStore(SQLBaseStore): "local_media_thumbnails", {"media_id": media_id}, ( - "thumbnail_width", "thumbnail_height", + "thumbnail_width", "thumbnail_height", "thumbnail_method", "thumbnail_type", "thumbnail_length", ) ) def store_local_thumbnail(self, media_id, thumbnail_width, - thumbnail_height, thumbnail_type, - thumbnail_length): + thumbnail_height, thumbnail_method, + thumbnail_type, thumbnail_length): return self._simple_insert( "local_media_thumbnails", { "media_id": media_id, "thumbnail_width": thumbnail_width, "thumbnail_height": thumbnail_height, + "thumbnail_method": thumbnail_method, "thumbnail_type": thumbnail_type, "thumbnail_length": thumbnail_length, } @@ -101,15 +102,16 @@ class MediaRepositoryStore(SQLBaseStore): "remote_media_cache_thumbnails", {"origin": origin, "media_id": media_id}, ( - "thumbnail_width", "thumbnail_height", + "thumbnail_width", "thumbnail_height", "thumbnail_method" "thumbnail_type", "thumbnail_length", "filesystem_id" ) ) def store_remote_media_thumbnail(self, origin, media_id, thumbnail_width, - thumbnail_height, thumbnail_type, - thumbnail_length, filesystem_id): + thumbnail_height, thumbnail_method, + thumbnail_type, thumbnail_length, + filesystem_id): return self._simple_insert( "remote_media_cache_thumbnails", { @@ -117,6 +119,7 @@ class MediaRepositoryStore(SQLBaseStore): "media_id": media_id, "thumbnail_width": thumbnail_width, "thumbnail_height": thumbnail_height, + "thumbnail_method": thumbnail_method, "thumbnail_type": thumbnail_type, "thumbnail_length": thumbnail_length, "filesystem_id": filesystem_id, diff --git a/synapse/storage/schema/media_repository.sql b/synapse/storage/schema/media_repository.sql index 768752296f..b785fa0208 100644 --- a/synapse/storage/schema/media_repository.sql +++ b/synapse/storage/schema/media_repository.sql @@ -28,6 +28,7 @@ CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( thumbnail_width INTEGER, -- The width of the thumbnail in pixels. thumbnail_height INTEGER, -- The height of the thumbnail in pixels. thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_method TEXT, -- The method used to make the thumbnail. thumbnail_length INTEGER, -- The length of the thumbnail in bytes. CONSTRAINT uniqueness UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type @@ -53,12 +54,13 @@ CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( media_id TEXT, -- The id used to refer to the media. thumbnail_width INTEGER, -- The width of the thumbnail in pixels. thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_method TEXT, -- The method used to make the thumbnail thumbnail_type TEXT, -- The MIME-type of the thumbnail. thumbnail_length INTEGER, -- The length of the thumbnail in bytes. filesystem_id TEXT, -- The name used to store the media on disk. CONSTRAINT uniqueness UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type + thumbnail_type, thumbnail_type ) ); -- cgit 1.5.1 From aed62a35832a3ec1c7425ecc99cab06a781263ba Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 7 Dec 2014 02:26:07 +0000 Subject: track replication destination health, and perform exponential back-off when sending transactions. does *not* yet retry transactions, but drops them on the floor if waiting for a server to recover. --- synapse/federation/replication.py | 43 +++++++++++++++--- synapse/federation/transport.py | 2 +- synapse/http/matrixfederationclient.py | 16 ++++--- synapse/rest/transactions.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/schema/delta/v9.sql | 23 ++++++++++ synapse/storage/schema/transactions.sql | 6 +++ synapse/storage/transactions.py | 78 ++++++++++++++++++++++++++++++++- 9 files changed, 156 insertions(+), 18 deletions(-) create mode 100644 synapse/storage/schema/delta/v9.sql (limited to 'synapse/storage/schema') diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py index 01f87fe423..f9c05b5ea3 100644 --- a/synapse/federation/replication.py +++ b/synapse/federation/replication.py @@ -723,6 +723,8 @@ class _TransactionQueue(object): deferreds = [] for destination in destinations: + # XXX: why don't we specify an errback for this deferred + # like we do for EDUs? --matthew deferred = defer.Deferred() self.pending_pdus_by_dest.setdefault(destination, []).append( (pdu, deferred, order) @@ -738,6 +740,9 @@ class _TransactionQueue(object): # NO inlineCallbacks def enqueue_edu(self, edu): destination = edu.destination + + if destination == self.server_name: + return deferred = defer.Deferred() self.pending_edus_by_dest.setdefault(destination, []).append( @@ -766,14 +771,23 @@ class _TransactionQueue(object): ) yield deferred - + @defer.inlineCallbacks @log_function def _attempt_new_transaction(self, destination): + + (retry_last_ts, retry_interval) = self.store.get_destination_retry_timings(destination) + if retry_last_ts + retry_interval > int(self._clock.time_msec()): + logger.info("TX [%s] not ready for retry yet - dropping transaction for now") + return + if destination in self.pending_transactions: + # XXX: pending_transactions can get stuck on by a never-ending request + # at which point pending_pdus_by_dest just keeps growing. + # we need application-layer timeouts of some flavour of these requests return - # list of (pending_pdu, deferred, order) + # list of (pending_pdu, deferred, order) pending_pdus = self.pending_pdus_by_dest.pop(destination, []) pending_edus = self.pending_edus_by_dest.pop(destination, []) pending_failures = self.pending_failures_by_dest.pop(destination, []) @@ -781,7 +795,8 @@ class _TransactionQueue(object): if not pending_pdus and not pending_edus and not pending_failures: return - logger.debug("TX [%s] Attempting new transaction", destination) + logger.debug("TX [%s] Attempting new transaction (pdus: %d, edus: %d, failures: %d)", + destination, len(pending_pdus), len(pending_edus), len(pending_failures)) # Sort based on the order field pending_pdus.sort(key=lambda t: t[2]) @@ -814,7 +829,7 @@ class _TransactionQueue(object): yield self.transaction_actions.prepare_to_send(transaction) logger.debug("TX [%s] Persisted transaction", destination) - logger.debug("TX [%s] Sending transaction...", destination) + logger.info("TX [%s] Sending transaction [%s]", destination, transaction.transaction_id) # Actually send the transaction @@ -835,6 +850,8 @@ class _TransactionQueue(object): transaction, json_data_cb ) + logger.info("TX [%s] got %d response", destination, code) + logger.debug("TX [%s] Sent transaction", destination) logger.debug("TX [%s] Marking as delivered...", destination) @@ -849,6 +866,7 @@ class _TransactionQueue(object): if code == 200: deferred.callback(None) else: + start_retrying(destination, retry_interval) deferred.errback(RuntimeError("Got status %d" % code)) # Ensures we don't continue until all callbacks on that @@ -861,12 +879,12 @@ class _TransactionQueue(object): logger.debug("TX [%s] Yielded to callbacks", destination) except Exception as e: - logger.error("TX Problem in _attempt_transaction") - # We capture this here as there as nothing actually listens # for this finishing functions deferred. - logger.exception(e) + logger.exception("TX [%s] Problem in _attempt_transaction: %s", destination, e) + start_retrying(destination, retry_interval) + for deferred in deferreds: if not deferred.called: deferred.errback(e) @@ -877,3 +895,14 @@ class _TransactionQueue(object): # Check to see if there is anything else to send. self._attempt_new_transaction(destination) + +def start_retrying(destination, retry_interval): + # track that this destination is having problems and we should + # give it a chance to recover before trying it again + if retry_interval: + retry_interval *= 2 + else: + retry_interval = 2 # try again at first after 2 seconds + self.store.set_destination_retry_timings(destination, + int(self._clock.time_msec()), retry_interval) + \ No newline at end of file diff --git a/synapse/federation/transport.py b/synapse/federation/transport.py index 8d86152085..0f11c6d491 100644 --- a/synapse/federation/transport.py +++ b/synapse/federation/transport.py @@ -155,7 +155,7 @@ class TransportLayer(object): @defer.inlineCallbacks @log_function def send_transaction(self, transaction, json_data_callback=None): - """ Sends the given Transaction to it's destination + """ Sends the given Transaction to its destination Args: transaction (Transaction) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 510f07dd7b..3edc59dbab 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -89,7 +89,7 @@ class MatrixFederationHttpClient(object): ("", "", path_bytes, param_bytes, query_bytes, "",) ) - logger.debug("Sending request to %s: %s %s", + logger.info("Sending request to %s: %s %s", destination, method, url_bytes) logger.debug( @@ -101,7 +101,10 @@ class MatrixFederationHttpClient(object): ] ) - retries_left = 5 + # was 5; for now, let's only try once at the HTTP layer and then + # rely on transaction-layer retries for exponential backoff and + # getting the message through. + retries_left = 0 endpoint = self._getEndpoint(reactor, destination) @@ -131,7 +134,8 @@ class MatrixFederationHttpClient(object): e) raise SynapseError(400, "Domain specified not found.") - logger.exception("Got error in _create_request") + logger.exception("Sending request failed to %s: %s %s : %s", + destination, method, url_bytes, e) _print_ex(e) if retries_left: @@ -140,15 +144,15 @@ class MatrixFederationHttpClient(object): else: raise + logger.info("Received response %d %s for %s: %s %s", + response.code, response.phrase, destination, method, url_bytes) + if 200 <= response.code < 300: # We need to update the transactions table to say it was sent? pass else: # :'( # Update transactions table? - logger.error( - "Got response %d %s", response.code, response.phrase - ) raise CodeMessageException( response.code, response.phrase ) diff --git a/synapse/rest/transactions.py b/synapse/rest/transactions.py index 93c0122f30..8c41ab4edb 100644 --- a/synapse/rest/transactions.py +++ b/synapse/rest/transactions.py @@ -19,7 +19,7 @@ import logging logger = logging.getLogger(__name__) - +# FIXME: elsewhere we use FooStore to indicate something in the storage layer... class HttpTransactionStore(object): def __init__(self): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f15e3dfe62..04ab39341d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -67,7 +67,7 @@ SCHEMAS = [ # Remember to update this number every time an incompatible change is made to # database schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 8 +SCHEMA_VERSION = 9 class _RollbackButIsFineException(Exception): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 4881f03368..e72200e2f7 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -650,7 +650,7 @@ class JoinHelper(object): to dump the results into. Attributes: - taples (list): List of `Table` classes + tables (list): List of `Table` classes EntryType (type) """ diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql new file mode 100644 index 0000000000..ad680c64da --- /dev/null +++ b/synapse/storage/schema/delta/v9.sql @@ -0,0 +1,23 @@ +/* Copyright 2014 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- To track destination health +CREATE TABLE IF NOT EXISTS destinations( + destination TEXT PRIMARY KEY, + retry_last_ts INTEGER, + retry_interval INTEGER +); + +PRAGMA user_version = 9; \ No newline at end of file diff --git a/synapse/storage/schema/transactions.sql b/synapse/storage/schema/transactions.sql index 88e3e4e04d..de461bfa15 100644 --- a/synapse/storage/schema/transactions.sql +++ b/synapse/storage/schema/transactions.sql @@ -59,3 +59,9 @@ CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(tra CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination); +-- To track destination health +CREATE TABLE IF NOT EXISTS destinations( + destination TEXT PRIMARY KEY, + retry_last_ts INTEGER, + retry_interval INTEGER +); diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 00d0f48082..47b73f7458 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -114,7 +114,7 @@ class TransactionStore(SQLBaseStore): def _prep_send_transaction(self, txn, transaction_id, destination, origin_server_ts): - # First we find out what the prev_txs should be. + # First we find out what the prev_txns should be. # Since we know that we are only sending one transaction at a time, # we can simply take the last one. query = "%s ORDER BY id DESC LIMIT 1" % ( @@ -205,6 +205,71 @@ class TransactionStore(SQLBaseStore): return ReceivedTransactionsTable.decode_results(txn.fetchall()) + def get_destination_retry_timings(self, destination): + """Gets the current retry timings (if any) for a given destination. + + Args: + destination (str) + + Returns: + None if not retrying + tuple: (retry_last_ts, retry_interval) + retry_ts: time of last retry attempt in unix epoch ms + retry_interval: how long until next retry in ms + """ + return self.runInteraction( + "get_destination_retry_timings", + self._get_destination_retry_timings, destination) + + def _get_destination_retry_timings(cls, txn, destination): + query = DestinationsTable.select_statement("destination = ?") + txn.execute(query, (destination,)) + result = DestinationsTable.decode_single_result(txn.fetchone()) + if result and result[0] > 0: + return result + else: + return None + + def set_destination_retry_timings(self, destination): + """Sets the current retry timings for a given destination. + Both timings should be zero if retrying is no longer occuring. + + Args: + destination (str) + retry_last_ts (int) - time of last retry attempt in unix epoch ms + retry_interval (int) - how long until next retry in ms + """ + return self.runInteraction( + "set_destination_retry_timings", + self._set_destination_retry_timings, destination, retry_last_ts, retry_interval) + + def _set_destination_retry_timings(cls, txn, destination, retry_last_ts, retry_interval): + + query = ( + "INSERT OR REPLACE INTO %s " + "(retry_last_ts, retry_interval) " + "VALUES (?, ?) " + "WHERE destination = ?" + ) % DestinationsTable.table_name + + txn.execute(query, (retry_last_ts, retry_interval, destination)) + + def get_destinations_needing_retry(self): + """Get all destinations which are due a retry for sending a transaction. + + Returns: + list: A list of `DestinationsTable.EntryType` + """ + return self.runInteraction( + "get_destinations_needing_retry", + self._get_destinations_needing_retry + ) + + def _get_destinations_needing_retry(cls, txn): + where = "retry_last_ts > 0 and retry_next_ts < now()" + query = DestinationsTable.select_statement(where) + txn.execute(query) + return DestinationsTable.decode_results(txn.fetchall()) class ReceivedTransactionsTable(Table): table_name = "received_transactions" @@ -247,3 +312,14 @@ class TransactionsToPduTable(Table): ] EntryType = namedtuple("TransactionsToPduEntry", fields) + +class DestinationsTable(Table): + table_name = "destinations" + + fields = [ + "destination", + "retry_last_ts", + "retry_interval", + ] + + EntryType = namedtuple("DestinationsEntry", fields) \ No newline at end of file -- cgit 1.5.1 From 02db7eb209af879a0168a371b4cb1c2ad0fcab49 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Dec 2014 14:02:48 +0000 Subject: Fix bug when uploading state with empty state_key --- synapse/events/__init__.py | 10 +++++-- synapse/handlers/_base.py | 66 ++++--------------------------------------- synapse/handlers/message.py | 15 ---------- synapse/rest/room.py | 20 +++++++------ synapse/storage/__init__.py | 4 ++- synapse/storage/schema/im.sql | 2 ++ 6 files changed, 29 insertions(+), 88 deletions(-) (limited to 'synapse/storage/schema') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 5f41933174..bd3ffb59cc 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -118,6 +118,9 @@ class EventBase(object): return d + def get(self, key, default): + return self._event_dict.get(key, default) + def get_internal_metadata_dict(self): return self.internal_metadata.get_dict() @@ -165,6 +168,9 @@ class FrozenEvent(EventBase): return _unfreeze(super(FrozenEvent, self).get_dict()) def __str__(self): + return self.__repr__() + + def __repr__(self): return "" % ( - self.event_id, self.type, self.state_key, - ) + self.event_id, self.type, self.get("state_key", None), + ) \ No newline at end of file diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 6b5f6e7cd8..fdd3151877 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -97,6 +97,11 @@ class BaseHandler(object): event = builder.build() + logger.debug( + "Created event %s with auth_events: %s, current state: %s", + event.event_id, context.auth_events, context.current_state, + ) + defer.returnValue( (event, context,) ) @@ -147,64 +152,3 @@ class BaseHandler(object): None, destinations=destinations, ) - - # @defer.inlineCallbacks - # def _on_new_room_event(self, event, snapshot, extra_destinations=[], - # extra_users=[], suppress_auth=False, - # do_invite_host=None): - # yield run_on_reactor() - # - # snapshot.fill_out_prev_events(event) - # - # yield self.state_handler.annotate_event_with_state(event) - # - # yield self.auth.add_auth_events(event) - # - # logger.debug("Signing event...") - # - # add_hashes_and_signatures( - # event, self.server_name, self.signing_key - # ) - # - # logger.debug("Signed event.") - # - # if not suppress_auth: - # logger.debug("Authing...") - # self.auth.check(event, auth_events=event.old_state_events) - # logger.debug("Authed") - # else: - # logger.debug("Suppressed auth.") - # - # if do_invite_host: - # federation_handler = self.hs.get_handlers().federation_handler - # invite_event = yield federation_handler.send_invite( - # do_invite_host, - # event - # ) - # - # # FIXME: We need to check if the remote changed anything else - # event.signatures = invite_event.signatures - # - # yield self.store.persist_event(event) - # - # destinations = set(extra_destinations) - # # Send a PDU to all hosts who have joined the room. - # - # for k, s in event.state_events.items(): - # try: - # if k[0] == RoomMemberEvent.TYPE: - # if s.content["membership"] == Membership.JOIN: - # destinations.add( - # self.hs.parse_userid(s.state_key).domain - # ) - # except: - # logger.warn( - # "Failed to get destination from event %s", s.event_id - # ) - # - # event.destinations = list(destinations) - # - # yield self.notifier.on_new_room_event(event, extra_users=extra_users) - # - # federation_handler = self.hs.get_handlers().federation_handler - # yield federation_handler.handle_new_event(event, snapshot) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 13fa0be7b4..9043b945e4 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -165,21 +165,6 @@ class MessageHandler(BaseHandler): defer.returnValue(event) - @defer.inlineCallbacks - def store_room_data(self, event=None): - """ Stores data for a room. - - Args: - event : The room path event - stamp_event (bool) : True to stamp event content with server keys. - Raises: - SynapseError if something went wrong. - """ - - snapshot = yield self.store.snapshot_room(event) - - yield self._on_new_room_event(event, snapshot) - @defer.inlineCallbacks def get_room_data(self, user_id=None, room_id=None, event_type=None, state_key=""): diff --git a/synapse/rest/room.py b/synapse/rest/room.py index 3d78b4ff5c..22e1244e57 100644 --- a/synapse/rest/room.py +++ b/synapse/rest/room.py @@ -147,16 +147,18 @@ class RoomStateEventRestServlet(RestServlet): content = _parse_json(request) + event_dict = { + "type": event_type, + "content": content, + "room_id": urllib.unquote(room_id), + "sender": user.to_string(), + } + + if state_key is not None: + event_dict["state_key"] = urllib.unquote(state_key) + msg_handler = self.handlers.message_handler - yield msg_handler.handle_event( - { - "type": event_type, - "content": content, - "room_id": room_id, - "sender": user.to_string(), - "state_key": urllib.unquote(state_key), - } - ) + yield msg_handler.handle_event(event_dict) defer.returnValue((200, {})) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7068424441..f8d895082d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -39,6 +39,7 @@ from .state import StateStore from .signatures import SignatureStore from syutil.base64util import decode_base64 +from syutil.jsonutil import encode_canonical_json from synapse.crypto.event_signing import compute_event_reference_hash @@ -162,7 +163,8 @@ class DataStore(RoomMemberStore, RoomStore, table="event_json", values={ "event_id": event.event_id, - "json": json.dumps(event_dict, separators=(',', ':')), + "room_id": event.room_id, + "json": encode_canonical_json(event_dict), }, or_replace=True, ) diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index cb0c494ddf..0300bb29e1 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -35,11 +35,13 @@ CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); CREATE TABLE IF NOT EXISTS event_json( event_id TEXT NOT NULL, + room_id TEXT NOT NULL, json BLOB NOT NULL, CONSTRAINT ev_j_uniq UNIQUE (event_id) ); CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id); +CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id); CREATE TABLE IF NOT EXISTS state_events( -- cgit 1.5.1 From 1d2a0040cff8d04cdc7d7d09d8f04a5d628fa9dd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Dec 2014 15:55:03 +0000 Subject: Fix bug where we clobbered old state group values --- synapse/handlers/federation.py | 9 +++++++++ synapse/storage/__init__.py | 3 ++- synapse/storage/schema/state.sql | 3 ++- 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'synapse/storage/schema') diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e5deb8a9ef..2201cd977e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -346,6 +346,8 @@ class FederationHandler(BaseHandler): event.get_pdu_json() ) + handled_events = set() + try: builder.event_id = self.event_factory.create_event_id() builder.origin = self.hs.hostname @@ -371,6 +373,10 @@ class FederationHandler(BaseHandler): auth_chain = ret["auth_chain"] auth_chain.sort(key=lambda e: e.depth) + handled_events.update([s.event_id for s in state]) + handled_events.update([a.event_id for a in auth_chain]) + handled_events.add(new_event.event_id) + logger.debug("do_invite_join auth_chain: %s", auth_chain) logger.debug("do_invite_join state: %s", state) @@ -426,6 +432,9 @@ class FederationHandler(BaseHandler): del self.room_queues[room_id] for p, origin in room_queue: + if p.event_id in handled_events: + continue + try: self.on_receive_pdu(origin, p, backfilled=False) except: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f8d895082d..2db2e9720f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -220,7 +220,8 @@ class DataStore(RoomMemberStore, RoomStore, room_id=event.room_id, ) - self._store_state_groups_txn(txn, event, context) + if not outlier: + self._store_state_groups_txn(txn, event, context) if current_state: txn.execute( diff --git a/synapse/storage/schema/state.sql b/synapse/storage/schema/state.sql index 44f7aafb27..2c48d6daca 100644 --- a/synapse/storage/schema/state.sql +++ b/synapse/storage/schema/state.sql @@ -29,7 +29,8 @@ CREATE TABLE IF NOT EXISTS state_groups_state( CREATE TABLE IF NOT EXISTS event_to_state_groups( event_id TEXT NOT NULL, - state_group INTEGER NOT NULL + state_group INTEGER NOT NULL, + CONSTRAINT event_to_state_groups_uniq UNIQUE (event_id) ); CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id); -- cgit 1.5.1 From 882dc8dcab434d24b0e902be3b4545ceabe9bdbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Dec 2014 13:17:09 +0000 Subject: Persist internal_metadata --- synapse/events/__init__.py | 5 +++-- synapse/storage/__init__.py | 5 +++++ synapse/storage/_base.py | 7 ++++--- synapse/storage/schema/im.sql | 1 + 4 files changed, 13 insertions(+), 5 deletions(-) (limited to 'synapse/storage/schema') diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 1ec79a6788..984f14fce4 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -111,7 +111,7 @@ class EventBase(object): class FrozenEvent(EventBase): - def __init__(self, event_dict): + def __init__(self, event_dict, internal_metadata_dict={}): event_dict = copy.deepcopy(event_dict) signatures = copy.deepcopy(event_dict.pop("signatures", {})) @@ -122,7 +122,8 @@ class FrozenEvent(EventBase): super(FrozenEvent, self).__init__( frozen_dict, signatures=signatures, - unsigned=unsigned + unsigned=unsigned, + internal_metadata_dict=internal_metadata_dict, ) @staticmethod diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 5c079da5ba..26f205ae8f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -156,12 +156,17 @@ class DataStore(RoomMemberStore, RoomStore, ] } + metadata_json = encode_canonical_json( + event.internal_metadata.get_dict() + ) + self._simple_insert_txn( txn, table="event_json", values={ "event_id": event.event_id, "room_id": event.room_id, + "internal_metadata": metadata_json.decode("UTF-8"), "json": encode_canonical_json(event_dict).decode("UTF-8"), }, or_replace=True, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 31d5163c19..6dc857c4aa 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -452,7 +452,7 @@ class SQLBaseStore(object): def _get_event_txn(self, txn, event_id, check_redacted=True, get_prev_content=True): sql = ( - "SELECT json, r.event_id FROM event_json as e " + "SELECT internal_metadata, json, r.event_id FROM event_json as e " "LEFT JOIN redactions as r ON e.event_id = r.redacts " "WHERE e.event_id = ? " "LIMIT 1 " @@ -465,11 +465,12 @@ class SQLBaseStore(object): if not res: return None - js, redacted = res + internal_metadata, js, redacted = res d = json.loads(js) + internal_metadata = json.loads(internal_metadata) - ev = FrozenEvent(d) + ev = FrozenEvent(d, internal_metadata_dict=internal_metadata) if check_redacted and redacted: ev = prune_event(ev) diff --git a/synapse/storage/schema/im.sql b/synapse/storage/schema/im.sql index 0300bb29e1..253f9f779b 100644 --- a/synapse/storage/schema/im.sql +++ b/synapse/storage/schema/im.sql @@ -36,6 +36,7 @@ CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id); CREATE TABLE IF NOT EXISTS event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, + internal_metadata NOT NULL, json BLOB NOT NULL, CONSTRAINT ev_j_uniq UNIQUE (event_id) ); -- cgit 1.5.1 From 42b725ce52844b3e858193aa12ddc06933c7584a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Dec 2014 15:13:34 +0000 Subject: Fix upgrade script to run all the missing deltas. --- scripts/upgrade_db_to_v0.6.0.py | 20 +++++++++++++ synapse/storage/schema/delta/v9.sql | 58 ++++++++++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 1 deletion(-) (limited to 'synapse/storage/schema') diff --git a/scripts/upgrade_db_to_v0.6.0.py b/scripts/upgrade_db_to_v0.6.0.py index add088a818..32c415a662 100644 --- a/scripts/upgrade_db_to_v0.6.0.py +++ b/scripts/upgrade_db_to_v0.6.0.py @@ -1,3 +1,5 @@ + +from synapse.storage import SCHEMA_VERSION, read_schema from synapse.storage._base import SQLBaseStore from synapse.storage.signatures import SignatureStore from synapse.storage.event_federation import EventFederationStore @@ -186,12 +188,16 @@ def get_key(server_name): def reinsert_events(cursor, server_name, signing_key): + print "Running delta: v10" + cursor.executescript(delta_sql) cursor.execute( "SELECT * FROM events ORDER BY rowid ASC" ) + print "Getting events..." + rows = store.cursor_to_dict(cursor) events = store._generate_event_json(cursor, rows) @@ -281,7 +287,21 @@ def reinsert_events(cursor, server_name, signing_key): def main(database, server_name, signing_key): conn = sqlite3.connect(database) cursor = conn.cursor() + + # Do other deltas: + cursor.execute("PRAGMA user_version") + row = cursor.fetchone() + + if row and row[0]: + user_version = row[0] + # Run every version since after the current version. + for v in range(user_version + 1, 10): + print "Running delta: %d" % (v,) + sql_script = read_schema("delta/v%d" % (v,)) + cursor.executescript(sql_script) + reinsert_events(cursor, server_name, signing_key) + conn.commit() print "Success!" diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql index ad680c64da..0af29733a0 100644 --- a/synapse/storage/schema/delta/v9.sql +++ b/synapse/storage/schema/delta/v9.sql @@ -20,4 +20,60 @@ CREATE TABLE IF NOT EXISTS destinations( retry_interval INTEGER ); -PRAGMA user_version = 9; \ No newline at end of file + +CREATE TABLE IF NOT EXISTS local_media_repository ( + media_id TEXT, -- The id used to refer to the media. + media_type TEXT, -- The MIME-type of the media. + media_length INTEGER, -- Length of the media in bytes. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + user_id TEXT, -- The user who uploaded the file. + CONSTRAINT uniqueness UNIQUE (media_id) +); + +CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_method TEXT, -- The method used to make the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + CONSTRAINT uniqueness UNIQUE ( + media_id, thumbnail_width, thumbnail_height, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); + +CREATE TABLE IF NOT EXISTS remote_media_cache ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media on that server. + media_type TEXT, -- The MIME-type of the media. + created_ts INTEGER, -- When the content was uploaded in ms. + upload_name TEXT, -- The name the media was uploaded with. + media_length INTEGER, -- Length of the media in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE (media_origin, media_id) +); + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( + media_origin TEXT, -- The remote HS the media came from. + media_id TEXT, -- The id used to refer to the media. + thumbnail_width INTEGER, -- The width of the thumbnail in pixels. + thumbnail_height INTEGER, -- The height of the thumbnail in pixels. + thumbnail_method TEXT, -- The method used to make the thumbnail + thumbnail_type TEXT, -- The MIME-type of the thumbnail. + thumbnail_length INTEGER, -- The length of the thumbnail in bytes. + filesystem_id TEXT, -- The name used to store the media on disk. + CONSTRAINT uniqueness UNIQUE ( + media_origin, media_id, thumbnail_width, thumbnail_height, + thumbnail_type, thumbnail_type + ) +); + +CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id + ON local_media_repository_thumbnails (media_id); + + +PRAGMA user_version = 9; -- cgit 1.5.1