diff --git a/.gitignore b/.gitignore
index 2cef1b0a5a..295a18b539 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,13 +6,14 @@
*.egg
*.egg-info
*.lock
-*.pyc
+*.py[cod]
*.snap
*.tac
_trial_temp/
_trial_temp*/
/out
.DS_Store
+__pycache__/
# stuff that is likely to exist when you run a server locally
/*.db
diff --git a/CHANGES.md b/CHANGES.md
index e68d3154ab..d9ecbac440 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,6 +1,17 @@
+Synapse 1.xx.0
+==============
+
+Note that synapse now expects an `X-Forwarded-Proto` header when used with a reverse proxy. Please see [UPGRADE.rst](UPGRADE.rst#upgrading-to-v1290) for more details on this change.
+
+
Synapse 1.28.0 (2021-02-25)
===========================
+Note that this release drops support for ARMv7 in the official Docker images, due to repeated problems building for ARMv7 (and the associated maintenance burden this entails).
+
+This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
+
+
Internal Changes
----------------
@@ -10,10 +21,6 @@ Internal Changes
Synapse 1.28.0rc1 (2021-02-19)
==============================
-Note that this release drops support for ARMv7 in the official Docker images, due to repeated problems building for ARMv7 (and the associated maintenance burden this entails).
-
-This release also fixes the documentation included in v1.27.0 around the callback URI for SAML2 identity providers. If your server is configured to use single sign-on via a SAML2 IdP, you may need to make configuration changes. Please review [UPGRADE.rst](UPGRADE.rst) for more details on these changes.
-
Removal warning
---------------
@@ -40,7 +47,7 @@ Bugfixes
--------
- Fix long-standing bug where sending email notifications would fail for rooms that the server had since left. ([\#9257](https://github.com/matrix-org/synapse/issues/9257))
-- Fix bug in Synapse 1.27.0rc1 which meant the "session expired" error page during SSO registration was badly formatted. ([\#9296](https://github.com/matrix-org/synapse/issues/9296))
+- Fix bug introduced in Synapse 1.27.0rc1 which meant the "session expired" error page during SSO registration was badly formatted. ([\#9296](https://github.com/matrix-org/synapse/issues/9296))
- Assert a maximum length for some parameters for spec compliance. ([\#9321](https://github.com/matrix-org/synapse/issues/9321), [\#9393](https://github.com/matrix-org/synapse/issues/9393))
- Fix additional errors when previewing URLs: "AttributeError 'NoneType' object has no attribute 'xpath'" and "ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.". ([\#9333](https://github.com/matrix-org/synapse/issues/9333))
- Fix a bug causing Synapse to impose the wrong type constraints on fields when processing responses from appservices to `/_matrix/app/v1/thirdparty/user/{protocol}`. ([\#9361](https://github.com/matrix-org/synapse/issues/9361))
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 6f628a6947..e852b806c2 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -85,6 +85,26 @@ for example:
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+Upgrading to v1.29.0
+====================
+
+Requirement for X-Forwarded-Proto header
+----------------------------------------
+
+When using Synapse with a reverse proxy (in particular, when using the
+`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
+`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
+will log a warning on each received request.
+
+To avoid the warning, administrators using a reverse proxy should ensure that
+the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
+indicate the protocol used by the client. See the [reverse proxy
+documentation](docs/reverse_proxy.md), where the example configurations have
+been updated to show how to set this header.
+
+(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
+sets `X-Forwarded-Proto` by default.)
+
Upgrading to v1.27.0
====================
diff --git a/changelog.d/9285.bugfix b/changelog.d/9285.bugfix
new file mode 100644
index 0000000000..81188c5473
--- /dev/null
+++ b/changelog.d/9285.bugfix
@@ -0,0 +1 @@
+Fix a bug where users' pushers were not all deleted when they deactivated their account.
diff --git a/changelog.d/9358.misc b/changelog.d/9358.misc
new file mode 100644
index 0000000000..cc7614afc0
--- /dev/null
+++ b/changelog.d/9358.misc
@@ -0,0 +1 @@
+Added a fix that invalidates cache for empty timed-out sync responses.
\ No newline at end of file
diff --git a/changelog.d/9436.bugfix b/changelog.d/9436.bugfix
new file mode 100644
index 0000000000..a530516eed
--- /dev/null
+++ b/changelog.d/9436.bugfix
@@ -0,0 +1 @@
+Fix a bug in single sign-on which could cause a "No session cookie found" error.
diff --git a/changelog.d/9472.feature b/changelog.d/9472.feature
new file mode 100644
index 0000000000..06cfd5d199
--- /dev/null
+++ b/changelog.d/9472.feature
@@ -0,0 +1 @@
+Add support for `X-Forwarded-Proto` header when using a reverse proxy.
diff --git a/changelog.d/9479.bugfix b/changelog.d/9479.bugfix
new file mode 100644
index 0000000000..2ab4f315c1
--- /dev/null
+++ b/changelog.d/9479.bugfix
@@ -0,0 +1 @@
+Fix deleting pushers when using sharded pushers.
diff --git a/changelog.d/9496.misc b/changelog.d/9496.misc
new file mode 100644
index 0000000000..d5866c56f7
--- /dev/null
+++ b/changelog.d/9496.misc
@@ -0,0 +1 @@
+Test that we require validated email for email pushers.
diff --git a/changelog.d/9501.feature b/changelog.d/9501.feature
new file mode 100644
index 0000000000..06cfd5d199
--- /dev/null
+++ b/changelog.d/9501.feature
@@ -0,0 +1 @@
+Add support for `X-Forwarded-Proto` header when using a reverse proxy.
diff --git a/changelog.d/9502.misc b/changelog.d/9502.misc
new file mode 100644
index 0000000000..c5c29672b6
--- /dev/null
+++ b/changelog.d/9502.misc
@@ -0,0 +1 @@
+Allow python to generate bytecode for synapse.
\ No newline at end of file
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index cf19084a9f..cad7d16883 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -58,10 +58,10 @@ trap "rm -r $tmpdir" EXIT
cp -r tests "$tmpdir"
PYTHONPATH="$tmpdir" \
- "${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
+ "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
# build the config file
-"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
+"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
--config-dir="/etc/matrix-synapse" \
--data-dir="/var/lib/matrix-synapse" |
perl -pe '
@@ -87,7 +87,7 @@ PYTHONPATH="$tmpdir" \
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
# build the log config file
-"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
+"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
# add a dependency on the right version of python to substvars.
diff --git a/debian/changelog b/debian/changelog
index 642e4d381d..04cd0d0d68 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+matrix-synapse-py3 (1.29.0) UNRELEASED; urgency=medium
+
+ [ Jonathan de Jong ]
+ * Remove the python -B flag (don't generate bytecode) in scripts and documentation.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 26 Feb 2021 14:41:31 +0100
+
matrix-synapse-py3 (1.28.0) stable; urgency=medium
* New synapse release 1.28.0.
diff --git a/debian/synctl.1 b/debian/synctl.1
index 437f8f9e0e..af58c8d224 100644
--- a/debian/synctl.1
+++ b/debian/synctl.1
@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
.
.nf
-$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
+$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
.
.fi
.
diff --git a/debian/synctl.ronn b/debian/synctl.ronn
index 1bad6094f3..10cbda988f 100644
--- a/debian/synctl.ronn
+++ b/debian/synctl.ronn
@@ -41,7 +41,7 @@ process.
Configuration file may be generated as follows:
- $ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
+ $ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
## ENVIRONMENT
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 04b6e24124..bb7caa8bb9 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -9,23 +9,23 @@ of doing so is that it means that you can expose the default https port
(443) to Matrix clients without needing to run Synapse with root
privileges.
-**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
-the requested URI in any way (for example, by decoding `%xx` escapes).
-Beware that Apache *will* canonicalise URIs unless you specify
-`nocanon`.
-
-When setting up a reverse proxy, remember that Matrix clients and other
-Matrix servers do not necessarily need to connect to your server via the
-same server name or port. Indeed, clients will use port 443 by default,
-whereas servers default to port 8448. Where these are different, we
-refer to the 'client port' and the 'federation port'. See [the Matrix
+You should configure your reverse proxy to forward requests to `/_matrix` or
+`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
+`X-Forwarded-Proto` request headers.
+
+You should remember that Matrix clients and other Matrix servers do not
+necessarily need to connect to your server via the same server name or
+port. Indeed, clients will use port 443 by default, whereas servers default to
+port 8448. Where these are different, we refer to the 'client port' and the
+'federation port'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](<delegate.md>) for instructions on setting up delegation.
-Endpoints that are part of the standardised Matrix specification are
-located under `/_matrix`, whereas endpoints specific to Synapse are
-located under `/_synapse/client`.
+**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
+the requested URI in any way (for example, by decoding `%xx` escapes).
+Beware that Apache *will* canonicalise URIs unless you specify
+`nocanon`.
Let's assume that we expect clients to connect to our server at
`https://matrix.example.com`, and other servers to connect at
@@ -52,6 +52,7 @@ server {
location ~* ^(\/_matrix|\/_synapse\/client) {
proxy_pass http://localhost:8008;
proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header X-Forwarded-Proto $scheme;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
@@ -102,6 +103,7 @@ example.com:8448 {
SSLEngine on
ServerName matrix.example.com;
+ RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
@@ -113,6 +115,7 @@ example.com:8448 {
SSLEngine on
ServerName example.com;
+ RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
AllowEncodedSlashes NoDecode
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
@@ -134,6 +137,9 @@ example.com:8448 {
```
frontend https
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+ http-request set-header X-Forwarded-Proto https if { ssl_fc }
+ http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
+ http-request set-header X-Forwarded-For %[src]
# Matrix client traffic
acl matrix-host hdr(host) -i matrix.example.com
@@ -144,6 +150,10 @@ frontend https
frontend matrix-federation
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+ http-request set-header X-Forwarded-Proto https if { ssl_fc }
+ http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
+ http-request set-header X-Forwarded-For %[src]
+
default_backend matrix
backend matrix
diff --git a/docs/spam_checker.md b/docs/spam_checker.md
index 47a27bf85c..e615ac9910 100644
--- a/docs/spam_checker.md
+++ b/docs/spam_checker.md
@@ -25,7 +25,7 @@ well as some specific methods:
* `check_username_for_spam`
* `check_registration_for_spam`
-The details of the each of these methods (as well as their inputs and outputs)
+The details of each of these methods (as well as their inputs and outputs)
are documented in the `synapse.events.spamcheck.SpamChecker` class.
The `ModuleApi` class provides a way for the custom spam checker class to
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index a01bac2997..4a9b0129c3 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -17,8 +17,6 @@ import sys
from synapse import python_dependencies # noqa: E402
-sys.dont_write_bytecode = True
-
logger = logging.getLogger(__name__)
try:
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 7911d126f5..7809c15589 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -120,6 +120,13 @@ class DeactivateAccountHandler(BaseHandler):
await self.store.user_set_password_hash(user_id, None)
+ # Most of the pushers will have been deleted when we logged out the
+ # associated devices above, but we still need to delete pushers not
+ # associated with devices, e.g. email pushers.
+ await self.store.delete_all_pushers_for_user(user_id)
+
+ # Set the user as no longer active. This will prevent their profile
+ # from being replicated.
user = UserID.from_string(user_id)
await self._profile_handler.set_active([user], False, False)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 9059382246..b45b179fed 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -291,8 +291,9 @@ class SyncHandler:
user_id = sync_config.user.to_string()
await self.auth.check_auth_blocking(requester=requester)
- res = await self.response_cache.wrap(
+ res = await self.response_cache.wrap_conditional(
sync_config.request_key,
+ lambda result: since_token != result.next_batch,
self._wait_for_sync_for_user,
sync_config,
since_token,
diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py
index c658862fe6..142b007d01 100644
--- a/synapse/http/__init__.py
+++ b/synapse/http/__init__.py
@@ -14,8 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
+from typing import Union
-from twisted.internet import task
+from twisted.internet import address, task
from twisted.web.client import FileBodyProducer
from twisted.web.iweb import IRequest
@@ -53,6 +54,40 @@ class QuieterFileBodyProducer(FileBodyProducer):
pass
+def get_request_uri(request: IRequest) -> bytes:
+ """Return the full URI that was requested by the client"""
+ return b"%s://%s%s" % (
+ b"https" if request.isSecure() else b"http",
+ _get_requested_host(request),
+ # despite its name, "request.uri" is only the path and query-string.
+ request.uri,
+ )
+
+
+def _get_requested_host(request: IRequest) -> bytes:
+ hostname = request.getHeader(b"host")
+ if hostname:
+ return hostname
+
+ # no Host header, use the address/port that the request arrived on
+ host = request.getHost() # type: Union[address.IPv4Address, address.IPv6Address]
+
+ hostname = host.host.encode("ascii")
+
+ if request.isSecure() and host.port == 443:
+ # default port for https
+ return hostname
+
+ if not request.isSecure() and host.port == 80:
+ # default port for http
+ return hostname
+
+ return b"%s:%i" % (
+ hostname,
+ host.port,
+ )
+
+
def get_request_user_agent(request: IRequest, default: str = "") -> str:
"""Return the last User-Agent header, or the given default."""
# There could be raw utf-8 bytes in the User-Agent header.
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 4a4fb5ef26..30153237e3 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -16,6 +16,10 @@ import logging
import time
from typing import Optional, Union
+import attr
+from zope.interface import implementer
+
+from twisted.internet.interfaces import IAddress
from twisted.python.failure import Failure
from twisted.web.server import Request, Site
@@ -333,26 +337,77 @@ class SynapseRequest(Request):
class XForwardedForRequest(SynapseRequest):
- def __init__(self, *args, **kw):
- SynapseRequest.__init__(self, *args, **kw)
+ """Request object which honours proxy headers
+ Extends SynapseRequest to replace getClientIP, getClientAddress, and isSecure with
+ information from request headers.
"""
- Add a layer on top of another request that only uses the value of an
- X-Forwarded-For header as the result of C{getClientIP}.
- """
- def getClientIP(self):
+ # the client IP and ssl flag, as extracted from the headers.
+ _forwarded_for = None # type: Optional[_XForwardedForAddress]
+ _forwarded_https = False # type: bool
+
+ def requestReceived(self, command, path, version):
+ # this method is called by the Channel once the full request has been
+ # received, to dispatch the request to a resource.
+ # We can use it to set the IP address and protocol according to the
+ # headers.
+ self._process_forwarded_headers()
+ return super().requestReceived(command, path, version)
+
+ def _process_forwarded_headers(self):
+ headers = self.requestHeaders.getRawHeaders(b"x-forwarded-for")
+ if not headers:
+ return
+
+ # for now, we just use the first x-forwarded-for header. Really, we ought
+ # to start from the client IP address, and check whether it is trusted; if it
+ # is, work backwards through the headers until we find an untrusted address.
+ # see https://github.com/matrix-org/synapse/issues/9471
+ self._forwarded_for = _XForwardedForAddress(
+ headers[0].split(b",")[0].strip().decode("ascii")
+ )
+
+ # if we got an x-forwarded-for header, also look for an x-forwarded-proto header
+ header = self.getHeader(b"x-forwarded-proto")
+ if header is not None:
+ self._forwarded_https = header.lower() == b"https"
+ else:
+ # this is done largely for backwards-compatibility so that people that
+ # haven't set an x-forwarded-proto header don't get a redirect loop.
+ logger.warning(
+ "forwarded request lacks an x-forwarded-proto header: assuming https"
+ )
+ self._forwarded_https = True
+
+ def isSecure(self):
+ if self._forwarded_https:
+ return True
+ return super().isSecure()
+
+ def getClientIP(self) -> str:
"""
- @return: The client address (the first address) in the value of the
- I{X-Forwarded-For header}. If the header is not present, return
- C{b"-"}.
+ Return the IP address of the client who submitted this request.
+
+ This method is deprecated. Use getClientAddress() instead.
"""
- return (
- self.requestHeaders.getRawHeaders(b"x-forwarded-for", [b"-"])[0]
- .split(b",")[0]
- .strip()
- .decode("ascii")
- )
+ if self._forwarded_for is not None:
+ return self._forwarded_for.host
+ return super().getClientIP()
+
+ def getClientAddress(self) -> IAddress:
+ """
+ Return the address of the client who submitted this request.
+ """
+ if self._forwarded_for is not None:
+ return self._forwarded_for
+ return super().getClientAddress()
+
+
+@implementer(IAddress)
+@attr.s(frozen=True, slots=True)
+class _XForwardedForAddress:
+ host = attr.ib(type=str)
class SynapseSite(Site):
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 1679a6d211..2534ecb1d4 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Dict, Iterable, Optional
from prometheus_client import Gauge
+from synapse.api.errors import Codes, SynapseError
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
@@ -113,6 +114,11 @@ class PusherPool:
The newly created pusher.
"""
+ if kind == "email":
+ email_owner = await self.store.get_user_id_by_threepid("email", pushkey)
+ if email_owner != user_id:
+ raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+
time_now_msec = self.clock.time_msec()
# create the pusher setting last_stream_ordering to the current maximum
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 6e2fbedd99..925edfc402 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -20,6 +20,7 @@ from synapse.api.errors import Codes, LoginError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.appservice import ApplicationService
from synapse.handlers.sso import SsoIdentityProvider
+from synapse.http import get_request_uri
from synapse.http.server import HttpServer, finish_request
from synapse.http.servlet import (
RestServlet,
@@ -354,6 +355,7 @@ class SsoRedirectServlet(RestServlet):
hs.get_oidc_handler()
self._sso_handler = hs.get_sso_handler()
self._msc2858_enabled = hs.config.experimental.msc2858_enabled
+ self._public_baseurl = hs.config.public_baseurl
def register(self, http_server: HttpServer) -> None:
super().register(http_server)
@@ -373,6 +375,32 @@ class SsoRedirectServlet(RestServlet):
async def on_GET(
self, request: SynapseRequest, idp_id: Optional[str] = None
) -> None:
+ if not self._public_baseurl:
+ raise SynapseError(400, "SSO requires a valid public_baseurl")
+
+ # if this isn't the expected hostname, redirect to the right one, so that we
+ # get our cookies back.
+ requested_uri = get_request_uri(request)
+ baseurl_bytes = self._public_baseurl.encode("utf-8")
+ if not requested_uri.startswith(baseurl_bytes):
+ # swap out the incorrect base URL for the right one.
+ #
+ # The idea here is to redirect from
+ # https://foo.bar/whatever/_matrix/...
+ # to
+ # https://public.baseurl/_matrix/...
+ #
+ i = requested_uri.index(b"/_matrix")
+ new_uri = baseurl_bytes[:-1] + requested_uri[i:]
+ logger.info(
+ "Requested URI %s is not canonical: redirecting to %s",
+ requested_uri.decode("utf-8", errors="replace"),
+ new_uri.decode("utf-8", errors="replace"),
+ )
+ request.redirect(new_uri)
+ finish_request(request)
+ return
+
client_redirect_url = parse_string(
request, "redirectUrl", required=True, encoding=None
)
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index 7cb69dd6bd..74219cb05e 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -373,3 +373,46 @@ class PusherStore(PusherWorkerStore):
await self.db_pool.runInteraction(
"delete_pusher", delete_pusher_txn, stream_id
)
+
+ async def delete_all_pushers_for_user(self, user_id: str) -> None:
+ """Delete all pushers associated with an account."""
+
+ # We want to generate a row in `deleted_pushers` for each pusher we're
+ # deleting, so we fetch the list now so we can generate the appropriate
+ # number of stream IDs.
+ #
+ # Note: technically there could be a race here between adding/deleting
+ # pushers, but a) the worst case if we don't stop a pusher until the
+ # next restart and b) this is only called when we're deactivating an
+ # account.
+ pushers = list(await self.get_pushers_by_user_id(user_id))
+
+ def delete_pushers_txn(txn, stream_ids):
+ self._invalidate_cache_and_stream( # type: ignore
+ txn, self.get_if_user_has_pusher, (user_id,)
+ )
+
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="pushers",
+ keyvalues={"user_name": user_id},
+ )
+
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="deleted_pushers",
+ values=[
+ {
+ "stream_id": stream_id,
+ "app_id": pusher.app_id,
+ "pushkey": pusher.pushkey,
+ "user_id": user_id,
+ }
+ for stream_id, pusher in zip(stream_ids, pushers)
+ ],
+ )
+
+ async with self._pushers_id_gen.get_next_mult(len(pushers)) as stream_ids:
+ await self.db_pool.runInteraction(
+ "delete_all_pushers_for_user", delete_pushers_txn, stream_ids
+ )
diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql b/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql
new file mode 100644
index 0000000000..20ba4abca3
--- /dev/null
+++ b/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql
@@ -0,0 +1,21 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We may not have deleted all pushers for deactivated accounts. Do so now.
+--
+-- Note: We don't bother updating the `deleted_pushers` table as it's just use
+-- to stop pushers on workers, and that will happen when they get next restarted.
+DELETE FROM pushers WHERE user_name IN (SELECT name FROM users WHERE deactivated = 1);
diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql b/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql
new file mode 100644
index 0000000000..2442eea6bc
--- /dev/null
+++ b/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql
@@ -0,0 +1,19 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- Delete all pushers associated with deleted devices. This is to clear up after
+-- a bug where they weren't correctly deleted when using workers.
+DELETE FROM pushers WHERE access_token NOT IN (SELECT id FROM access_tokens);
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 32228f42ee..53f85195a7 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, TypeVar
+from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, Set, TypeVar
from twisted.internet import defer
@@ -40,6 +40,7 @@ class ResponseCache(Generic[T]):
def __init__(self, hs: "HomeServer", name: str, timeout_ms: float = 0):
# Requests that haven't finished yet.
self.pending_result_cache = {} # type: Dict[T, ObservableDeferred]
+ self.pending_conditionals = {} # type: Dict[T, Set[Callable[[Any], bool]]]
self.clock = hs.get_clock()
self.timeout_sec = timeout_ms / 1000.0
@@ -101,7 +102,11 @@ class ResponseCache(Generic[T]):
self.pending_result_cache[key] = result
def remove(r):
- if self.timeout_sec:
+ should_cache = all(
+ func(r) for func in self.pending_conditionals.pop(key, [])
+ )
+
+ if self.timeout_sec and should_cache:
self.clock.call_later(
self.timeout_sec, self.pending_result_cache.pop, key, None
)
@@ -112,6 +117,31 @@ class ResponseCache(Generic[T]):
result.addBoth(remove)
return result.observe()
+ def add_conditional(self, key: T, conditional: Callable[[Any], bool]):
+ self.pending_conditionals.setdefault(key, set()).add(conditional)
+
+ def wrap_conditional(
+ self,
+ key: T,
+ should_cache: Callable[[Any], bool],
+ callback: "Callable[..., Any]",
+ *args: Any,
+ **kwargs: Any
+ ) -> defer.Deferred:
+ """The same as wrap(), but adds a conditional to the final execution.
+
+ When the final execution completes, *all* conditionals need to return True for it to properly cache,
+ else it'll not be cached in a timed fashion.
+ """
+
+ # See if there's already a result on this key that hasn't yet completed. Due to the single-threaded nature of
+ # python, adding a key immediately in the same execution thread will not cause a race condition.
+ result = self.get(key)
+ if not result or isinstance(result, defer.Deferred) and not result.called:
+ self.add_conditional(key, should_cache)
+
+ return self.wrap(key, callback, *args, **kwargs)
+
def wrap(
self, key: T, callback: "Callable[..., Any]", *args: Any, **kwargs: Any
) -> defer.Deferred:
diff --git a/synctl b/synctl
index cfa9cec0c4..56c0e3940f 100755
--- a/synctl
+++ b/synctl
@@ -30,7 +30,7 @@ import yaml
from synapse.config import find_config_files
-SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
+SYNAPSE = [sys.executable, "-m", "synapse.app.homeserver"]
GREEN = "\x1b[1;32m"
YELLOW = "\x1b[1;33m"
@@ -117,7 +117,6 @@ def start_worker(app: str, configfile: str, worker_configfile: str) -> bool:
args = [
sys.executable,
- "-B",
"-m",
app,
"-c",
diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py
index 34df39429b..3ea8b5bec7 100644
--- a/tests/http/test_proxyagent.py
+++ b/tests/http/test_proxyagent.py
@@ -361,81 +361,41 @@ class MatrixFederationAgentTests(TestCase):
body = self.successResultOf(treq.content(resp))
self.assertEqual(body, b"result")
- @patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"})
- def test_https_request_via_uppercase_proxy_with_blacklist(self):
+ @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"})
+ def test_http_request_via_proxy_with_blacklist(self):
# The blacklist includes the configured proxy IP.
agent = ProxyAgent(
BlacklistingReactorWrapper(
self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"])
),
self.reactor,
- contextFactory=get_test_https_policy(),
use_proxy=True,
)
self.reactor.lookups["proxy.com"] = "1.2.3.5"
- d = agent.request(b"GET", b"https://test.com/abc")
+ d = agent.request(b"GET", b"http://test.com")
# there should be a pending TCP connection
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.5")
- self.assertEqual(port, 1080)
+ self.assertEqual(port, 8888)
- # make a test HTTP server, and wire up the client
- proxy_server = self._make_connection(
+ # make a test server, and wire up the client
+ http_server = self._make_connection(
client_factory, _get_test_protocol_factory()
)
- # fish the transports back out so that we can do the old switcheroo
- s2c_transport = proxy_server.transport
- client_protocol = s2c_transport.other
- c2s_transport = client_protocol.transport
-
# the FakeTransport is async, so we need to pump the reactor
self.reactor.advance(0)
- # now there should be a pending CONNECT request
- self.assertEqual(len(proxy_server.requests), 1)
-
- request = proxy_server.requests[0]
- self.assertEqual(request.method, b"CONNECT")
- self.assertEqual(request.path, b"test.com:443")
-
- # tell the proxy server not to close the connection
- proxy_server.persistent = True
-
- # this just stops the http Request trying to do a chunked response
- # request.setHeader(b"Content-Length", b"0")
- request.finish()
-
- # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel
- ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory())
- ssl_protocol = ssl_factory.buildProtocol(None)
- http_server = ssl_protocol.wrappedProtocol
-
- ssl_protocol.makeConnection(
- FakeTransport(client_protocol, self.reactor, ssl_protocol)
- )
- c2s_transport.other = ssl_protocol
-
- self.reactor.advance(0)
-
- server_name = ssl_protocol._tlsConnection.get_servername()
- expected_sni = b"test.com"
- self.assertEqual(
- server_name,
- expected_sni,
- "Expected SNI %s but got %s" % (expected_sni, server_name),
- )
-
# now there should be a pending request
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
- self.assertEqual(request.path, b"/abc")
+ self.assertEqual(request.path, b"http://test.com")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
request.write(b"result")
request.finish()
@@ -446,8 +406,8 @@ class MatrixFederationAgentTests(TestCase):
body = self.successResultOf(treq.content(resp))
self.assertEqual(body, b"result")
- @patch.dict(os.environ, {"https_proxy": "proxy.com"})
- def test_https_request_via_proxy_with_blacklist(self):
+ @patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"})
+ def test_https_request_via_uppercase_proxy_with_blacklist(self):
# The blacklist includes the configured proxy IP.
agent = ProxyAgent(
BlacklistingReactorWrapper(
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 22f452ec24..941cf42429 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -21,6 +21,7 @@ import pkg_resources
from twisted.internet.defer import Deferred
import synapse.rest.admin
+from synapse.api.errors import Codes, SynapseError
from synapse.rest.client.v1 import login, room
from tests.unittest import HomeserverTestCase
@@ -100,12 +101,19 @@ class EmailPusherTests(HomeserverTestCase):
user_tuple = self.get_success(
self.hs.get_datastore().get_user_by_access_token(self.access_token)
)
- token_id = user_tuple.token_id
+ self.token_id = user_tuple.token_id
+
+ # We need to add email to account before we can create a pusher.
+ self.get_success(
+ hs.get_datastore().user_add_threepid(
+ self.user_id, "email", "a@example.com", 0, 0
+ )
+ )
self.pusher = self.get_success(
self.hs.get_pusherpool().add_pusher(
user_id=self.user_id,
- access_token=token_id,
+ access_token=self.token_id,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
@@ -116,6 +124,28 @@ class EmailPusherTests(HomeserverTestCase):
)
)
+ def test_need_validated_email(self):
+ """Test that we can only add an email pusher if the user has validated
+ their email.
+ """
+ with self.assertRaises(SynapseError) as cm:
+ self.get_success_or_raise(
+ self.hs.get_pusherpool().add_pusher(
+ user_id=self.user_id,
+ access_token=self.token_id,
+ kind="email",
+ app_id="m.email",
+ app_display_name="Email Notifications",
+ device_display_name="b@example.com",
+ pushkey="b@example.com",
+ lang=None,
+ data={},
+ )
+ )
+
+ self.assertEqual(400, cm.exception.code)
+ self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode)
+
def test_simple_sends_email(self):
# Create a simple room with two users
room = self.helper.create_room_as(self.user_id, tok=self.access_token)
diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py
index fb29eaed6f..744d8d0941 100644
--- a/tests/rest/client/v1/test_login.py
+++ b/tests/rest/client/v1/test_login.py
@@ -15,7 +15,7 @@
import time
import urllib.parse
-from typing import Any, Dict, List, Union
+from typing import Any, Dict, List, Optional, Union
from urllib.parse import urlencode
from mock import Mock
@@ -47,8 +47,14 @@ except ImportError:
HAS_JWT = False
-# public_base_url used in some tests
-BASE_URL = "https://synapse/"
+# synapse server name: used to populate public_baseurl in some tests
+SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse"
+
+# public_baseurl for some tests. It uses an http:// scheme because
+# FakeChannel.isSecure() returns False, so synapse will see the requested uri as
+# http://..., so using http in the public_baseurl stops Synapse trying to redirect to
+# https://....
+BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,)
# CAS server used in some tests
CAS_SERVER = "https://fake.test"
@@ -480,11 +486,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
def test_multi_sso_redirect(self):
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
- channel = self.make_request(
- "GET",
- "/_matrix/client/r0/login/sso/redirect?redirectUrl="
- + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL),
- )
+ channel = self._make_sso_redirect_request(False, None)
self.assertEqual(channel.code, 302, channel.result)
uri = channel.headers.getRawHeaders("Location")[0]
@@ -628,34 +630,21 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
def test_client_idp_redirect_msc2858_disabled(self):
"""If the client tries to pick an IdP but MSC2858 is disabled, return a 400"""
- channel = self.make_request(
- "GET",
- "/_matrix/client/unstable/org.matrix.msc2858/login/sso/redirect/oidc?redirectUrl="
- + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL),
- )
+ channel = self._make_sso_redirect_request(True, "oidc")
self.assertEqual(channel.code, 400, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
@override_config({"experimental_features": {"msc2858_enabled": True}})
def test_client_idp_redirect_to_unknown(self):
"""If the client tries to pick an unknown IdP, return a 404"""
- channel = self.make_request(
- "GET",
- "/_matrix/client/unstable/org.matrix.msc2858/login/sso/redirect/xxx?redirectUrl="
- + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL),
- )
+ channel = self._make_sso_redirect_request(True, "xxx")
self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
@override_config({"experimental_features": {"msc2858_enabled": True}})
def test_client_idp_redirect_to_oidc(self):
"""If the client pick a known IdP, redirect to it"""
- channel = self.make_request(
- "GET",
- "/_matrix/client/unstable/org.matrix.msc2858/login/sso/redirect/oidc?redirectUrl="
- + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL),
- )
-
+ channel = self._make_sso_redirect_request(True, "oidc")
self.assertEqual(channel.code, 302, channel.result)
oidc_uri = channel.headers.getRawHeaders("Location")[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
@@ -663,6 +652,30 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
+ def _make_sso_redirect_request(
+ self, unstable_endpoint: bool = False, idp_prov: Optional[str] = None
+ ):
+ """Send a request to /_matrix/client/r0/login/sso/redirect
+
+ ... or the unstable equivalent
+
+ ... possibly specifying an IDP provider
+ """
+ endpoint = (
+ "/_matrix/client/unstable/org.matrix.msc2858/login/sso/redirect"
+ if unstable_endpoint
+ else "/_matrix/client/r0/login/sso/redirect"
+ )
+ if idp_prov is not None:
+ endpoint += "/" + idp_prov
+ endpoint += "?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+
+ return self.make_request(
+ "GET",
+ endpoint,
+ custom_headers=[("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME)],
+ )
+
@staticmethod
def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
prefix = key + " = "
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 8231a423f3..946740aa5d 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -542,13 +542,30 @@ class RestHelper:
if client_redirect_url:
params["redirectUrl"] = client_redirect_url
- # hit the redirect url (which will issue a cookie and state)
+ # hit the redirect url (which should redirect back to the redirect url. This
+ # is the easiest way of figuring out what the Host header ought to be set to
+ # to keep Synapse happy.
channel = make_request(
self.hs.get_reactor(),
self.site,
"GET",
"/_matrix/client/r0/login/sso/redirect?" + urllib.parse.urlencode(params),
)
+ assert channel.code == 302
+
+ # hit the redirect url again with the right Host header, which should now issue
+ # a cookie and redirect to the SSO provider.
+ location = channel.headers.getRawHeaders("Location")[0]
+ parts = urllib.parse.urlsplit(location)
+ channel = make_request(
+ self.hs.get_reactor(),
+ self.site,
+ "GET",
+ urllib.parse.urlunsplit(("", "") + parts[2:]),
+ custom_headers=[
+ ("Host", parts[1]),
+ ],
+ )
assert channel.code == 302
channel.extract_cookies(cookies)
diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py
index c26ad824f7..9734a2159a 100644
--- a/tests/rest/client/v2_alpha/test_auth.py
+++ b/tests/rest/client/v2_alpha/test_auth.py
@@ -161,7 +161,11 @@ class UIAuthTests(unittest.HomeserverTestCase):
def default_config(self):
config = super().default_config()
- config["public_baseurl"] = "https://synapse.test"
+
+ # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns
+ # False, so synapse will see the requested uri as http://..., so using http in
+ # the public_baseurl stops Synapse trying to redirect to https.
+ config["public_baseurl"] = "http://synapse.test"
if HAS_OIDC:
# we enable OIDC as a way of testing SSO flows
diff --git a/tests/server.py b/tests/server.py
index d4ece5c448..939a0008ca 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -124,7 +124,11 @@ class FakeChannel:
return address.IPv4Address("TCP", self._ip, 3423)
def getHost(self):
- return None
+ # this is called by Request.__init__ to configure Request.host.
+ return address.IPv4Address("TCP", "127.0.0.1", 8888)
+
+ def isSecure(self):
+ return False
@property
def transport(self):
|