diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
index 756759c2d8..5cf844bfb1 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
@@ -4,9 +4,9 @@ about: Create a report to help us improve
---
-<!--
+<!--
-**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
+**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
@@ -17,7 +17,7 @@ the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
-Text between <!-- and --> marks will be invisible in the report.
+Text between <!-- and --​> marks will be invisible in the report.
-->
@@ -31,7 +31,7 @@ Text between <!-- and --> marks will be invisible in the report.
- that reproduce the bug
- using hyphens as bullet points
-<!--
+<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
@@ -48,8 +48,8 @@ those (please be careful to remove any personal or private data). Please surroun
If not matrix.org:
-<!--
-What version of Synapse is running?
+<!--
+What version of Synapse is running?
You can find the Synapse version by inspecting the server headers (replace matrix.org with
your own homeserver domain):
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
diff --git a/.gitignore b/.gitignore
index a20f3e615d..a84c41b0c9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,11 +12,15 @@ _trial_temp/
_trial_temp*/
# stuff that is likely to exist when you run a server locally
+/*.db
+/*.log
+/*.log.config
+/*.pid
/*.signing.key
-/*.tls.crt
-/*.tls.key
-/uploads
+/env/
+/homeserver*.yaml
/media_store/
+/uploads
# IDEs
/.idea/
diff --git a/INSTALL.md b/INSTALL.md
index 5c67f14ed6..de6893530d 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -71,7 +71,8 @@ set this to the hostname of your server. For a more production-ready setup, you
will probably want to specify your domain (`example.com`) rather than a
matrix-specific hostname here (in the same way that your email address is
probably `user@example.com` rather than `user@email.example.com`) - but
-doing so may require more advanced setup. - see [Setting up Federation](README.rst#setting-up-federation). Beware that the server name cannot be changed later.
+doing so may require more advanced setup: see [Setting up Federation](docs/federate.md).
+Beware that the server name cannot be changed later.
This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your Home Server to
@@ -374,9 +375,16 @@ To configure Synapse to expose an HTTPS port, you will need to edit
* You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You can either
point these settings at an existing certificate and key, or you can
- enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
- for having Synapse automatically provision and renew federation
- certificates through ACME can be found at [ACME.md](docs/ACME.md).
+ enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
+ for having Synapse automatically provision and renew federation
+ certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
+ are using your own certificate, be sure to use a `.pem` file that includes
+ the full certificate chain including any intermediate certificates (for
+ instance, if using certbot, use `fullchain.pem` as your certificate, not
+ `cert.pem`).
+
+For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
+please take a look at `our guide <docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100>`_.
## Registering a user
diff --git a/README.rst b/README.rst
index c04fb4d19a..24afb93d7d 100644
--- a/README.rst
+++ b/README.rst
@@ -80,7 +80,10 @@ Thanks for using Matrix!
Synapse Installation
====================
-For details on how to install synapse, see `<INSTALL.md>`_.
+.. _federation:
+
+* For details on how to install synapse, see `<INSTALL.md>`_.
+* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
Connecting to Synapse from a client
@@ -93,13 +96,13 @@ Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see `<INSTALL.md#tls-certificates>`_.
-An easy way to get started is to login or register via Riot at
-https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
+An easy way to get started is to login or register via Riot at
+https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
-and instead specify a Homeserver URL of ``https://<server_name>:8448``
-(or just ``https://<server_name>`` if you are using a reverse proxy).
-(Leave the identity server as the default - see `Identity servers`_.)
-If you prefer to use another client, refer to our
+and instead specify a Homeserver URL of ``https://<server_name>:8448``
+(or just ``https://<server_name>`` if you are using a reverse proxy).
+(Leave the identity server as the default - see `Identity servers`_.)
+If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
If all goes well you should at least be able to log in, create a room, and
@@ -151,56 +154,6 @@ server on the same domain.
See https://github.com/vector-im/riot-web/issues/1977 and
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
-Troubleshooting
-===============
-
-Running out of File Handles
----------------------------
-
-If synapse runs out of filehandles, it typically fails badly - live-locking
-at 100% CPU, and/or failing to accept new TCP connections (blocking the
-connecting client). Matrix currently can legitimately use a lot of file handles,
-thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
-servers. The first time a server talks in a room it will try to connect
-simultaneously to all participating servers, which could exhaust the available
-file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
-to respond. (We need to improve the routing algorithm used to be better than
-full mesh, but as of June 2017 this hasn't happened yet).
-
-If you hit this failure mode, we recommend increasing the maximum number of
-open file handles to be at least 4096 (assuming a default of 1024 or 256).
-This is typically done by editing ``/etc/security/limits.conf``
-
-Separately, Synapse may leak file handles if inbound HTTP requests get stuck
-during processing - e.g. blocked behind a lock or talking to a remote server etc.
-This is best diagnosed by matching up the 'Received request' and 'Processed request'
-log lines and looking for any 'Processed request' lines which take more than
-a few seconds to execute. Please let us know at #synapse:matrix.org if
-you see this failure mode so we can help debug it, however.
-
-Help!! Synapse eats all my RAM!
--------------------------------
-
-Synapse's architecture is quite RAM hungry currently - we deliberately
-cache a lot of recent room data and metadata in RAM in order to speed up
-common requests. We'll improve this in future, but for now the easiest
-way to either reduce the RAM usage (at the risk of slowing things down)
-is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
-variable. The default is 0.5, which can be decreased to reduce RAM usage
-in memory constrained enviroments, or increased if performance starts to
-degrade.
-
-Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
-improvement in overall amount, and especially in terms of giving back RAM
-to the OS. To use it, the library must simply be put in the LD_PRELOAD
-environment variable when launching Synapse. On Debian, this can be done
-by installing the ``libjemalloc1`` package and adding this line to
-``/etc/default/matrix-synapse``::
-
- LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
-
-This can make a significant difference on Python 2.7 - it's unclear how
-much of an improvement it provides on Python 3.x.
Upgrading an existing Synapse
=============================
@@ -211,100 +164,19 @@ versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
-.. _federation:
-
-Setting up Federation
-=====================
-
-Federation is the process by which users on different servers can participate
-in the same room. For this to work, those other servers must be able to contact
-yours to send messages.
-
-The ``server_name`` in your ``homeserver.yaml`` file determines the way that
-other servers will reach yours. By default, they will treat it as a hostname
-and try to connect to port 8448. This is easy to set up and will work with the
-default configuration, provided you set the ``server_name`` to match your
-machine's public DNS hostname, and give Synapse a TLS certificate which is
-valid for your ``server_name``.
-
-For a more flexible configuration, you can set up a DNS SRV record. This allows
-you to run your server on a machine that might not have the same name as your
-domain name. For example, you might want to run your server at
-``synapse.example.com``, but have your Matrix user-ids look like
-``@user:example.com``. (A SRV record also allows you to change the port from
-the default 8448).
-
-To use a SRV record, first create your SRV record and publish it in DNS. This
-should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
-<synapse.server.name>``. The DNS record should then look something like::
-
- $ dig -t srv _matrix._tcp.example.com
- _matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
-
-Note that the server hostname cannot be an alias (CNAME record): it has to point
-directly to the server hosting the synapse instance.
-
-You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
-its user-ids, by setting ``server_name``::
-
- python -m synapse.app.homeserver \
- --server-name <yourdomain.com> \
- --config-path homeserver.yaml \
- --generate-config
- python -m synapse.app.homeserver --config-path homeserver.yaml
-
-If you've already generated the config file, you need to edit the ``server_name``
-in your ``homeserver.yaml`` file. If you've already started Synapse and a
-database has been created, you will have to recreate the database.
-
-If all goes well, you should be able to `connect to your server with a client`__,
-and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
-step. "Matrix HQ"'s sheer size and activity level tends to make even the
-largest boxes pause for thought.)
-
-.. __: `Connecting to Synapse from a client`_
-
-Troubleshooting
----------------
-
-You can use the `federation tester <https://matrix.org/federationtester>`_ to
-check if your homeserver is all set.
-
-The typical failure mode with federation is that when you try to join a room,
-it is rejected with "401: Unauthorized". Generally this means that other
-servers in the room couldn't access yours. (Joining a room over federation is a
-complicated dance which requires connections in both directions).
-
-So, things to check are:
-
-* If you are not using a SRV record, check that your ``server_name`` (the part
- of your user-id after the ``:``) matches your hostname, and that port 8448 on
- that hostname is reachable from outside your network.
-* If you *are* using a SRV record, check that it matches your ``server_name``
- (it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
- it specifies are reachable from outside your network.
-
-Another common problem is that people on other servers can't join rooms that
-you invite them to. This can be caused by an incorrectly-configured reverse
-proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
-configure a reverse proxy.
-
-Running a Demo Federation of Synapses
--------------------------------------
-
-If you want to get up and running quickly with a trio of homeservers in a
-private federation, there is a script in the ``demo`` directory. This is mainly
-useful just for development purposes. See `<demo/README>`_.
-
Using PostgreSQL
================
-As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
-alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
-traditionally used for convenience and simplicity.
+Synapse offers two database engines:
+ * `SQLite <https://sqlite.org/>`_
+ * `PostgreSQL <https://www.postgresql.org>`_
+
+By default Synapse uses SQLite in and doing so trades performance for convenience.
+SQLite is only recommended in Synapse for testing purposes or for servers with
+light workloads.
-The advantages of Postgres include:
+Almost all installations should opt to use PostreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
@@ -440,3 +312,54 @@ sphinxcontrib-napoleon::
Building internal API documentation::
python setup.py build_sphinx
+
+Troubleshooting
+===============
+
+Running out of File Handles
+---------------------------
+
+If synapse runs out of file handles, it typically fails badly - live-locking
+at 100% CPU, and/or failing to accept new TCP connections (blocking the
+connecting client). Matrix currently can legitimately use a lot of file handles,
+thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
+servers. The first time a server talks in a room it will try to connect
+simultaneously to all participating servers, which could exhaust the available
+file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
+to respond. (We need to improve the routing algorithm used to be better than
+full mesh, but as of March 2019 this hasn't happened yet).
+
+If you hit this failure mode, we recommend increasing the maximum number of
+open file handles to be at least 4096 (assuming a default of 1024 or 256).
+This is typically done by editing ``/etc/security/limits.conf``
+
+Separately, Synapse may leak file handles if inbound HTTP requests get stuck
+during processing - e.g. blocked behind a lock or talking to a remote server etc.
+This is best diagnosed by matching up the 'Received request' and 'Processed request'
+log lines and looking for any 'Processed request' lines which take more than
+a few seconds to execute. Please let us know at #synapse:matrix.org if
+you see this failure mode so we can help debug it, however.
+
+Help!! Synapse eats all my RAM!
+-------------------------------
+
+Synapse's architecture is quite RAM hungry currently - we deliberately
+cache a lot of recent room data and metadata in RAM in order to speed up
+common requests. We'll improve this in the future, but for now the easiest
+way to either reduce the RAM usage (at the risk of slowing things down)
+is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
+variable. The default is 0.5, which can be decreased to reduce RAM usage
+in memory constrained enviroments, or increased if performance starts to
+degrade.
+
+Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
+improvement in overall amount, and especially in terms of giving back RAM
+to the OS. To use it, the library must simply be put in the LD_PRELOAD
+environment variable when launching Synapse. On Debian, this can be done
+by installing the ``libjemalloc1`` package and adding this line to
+``/etc/default/matrix-synapse``::
+
+ LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
+
+This can make a significant difference on Python 2.7 - it's unclear how
+much of an improvement it provides on Python 3.x.
diff --git a/changelog.d/4662.misc b/changelog.d/4662.misc
new file mode 100644
index 0000000000..f4ec0d6a68
--- /dev/null
+++ b/changelog.d/4662.misc
@@ -0,0 +1 @@
+Add a systemd setup that supports synapse workers. Contributed by Luca Corbatto.
diff --git a/changelog.d/4793.feature b/changelog.d/4793.feature
deleted file mode 100644
index 90dba7d122..0000000000
--- a/changelog.d/4793.feature
+++ /dev/null
@@ -1 +0,0 @@
-Synapse is now permissive about trailing slashes on some of its federation endpoints, allowing zero or more to be present.
\ No newline at end of file
diff --git a/changelog.d/4832.misc b/changelog.d/4832.misc
new file mode 100644
index 0000000000..92022266c6
--- /dev/null
+++ b/changelog.d/4832.misc
@@ -0,0 +1 @@
+Improve federation documentation, specifically .well-known support. Many thanks to @vaab.
diff --git a/changelog.d/4843.misc b/changelog.d/4843.misc
new file mode 100644
index 0000000000..03d0a3e2e7
--- /dev/null
+++ b/changelog.d/4843.misc
@@ -0,0 +1 @@
+Add stuff back to the .gitignore.
diff --git a/changelog.d/4846.feature b/changelog.d/4846.feature
new file mode 100644
index 0000000000..8f792b8890
--- /dev/null
+++ b/changelog.d/4846.feature
@@ -0,0 +1 @@
+The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
diff --git a/changelog.d/4849.misc b/changelog.d/4849.misc
new file mode 100644
index 0000000000..f2cab20b44
--- /dev/null
+++ b/changelog.d/4849.misc
@@ -0,0 +1 @@
+Update install docs to explicitly state a full-chain (not just the top-level) TLS certificate must be provided to Synapse. This caused some people's Synapse ports to appear correct in a browser but still (rightfully so) upset the federation tester.
\ No newline at end of file
diff --git a/changelog.d/4853.feature b/changelog.d/4853.feature
new file mode 100644
index 0000000000..360f92e1de
--- /dev/null
+++ b/changelog.d/4853.feature
@@ -0,0 +1 @@
+Allow passing --daemonize flags to workers in the same way as with master.
diff --git a/contrib/systemd-with-workers/README.md b/contrib/systemd-with-workers/README.md
new file mode 100644
index 0000000000..74b261e9fb
--- /dev/null
+++ b/contrib/systemd-with-workers/README.md
@@ -0,0 +1,150 @@
+# Setup Synapse with Workers and Systemd
+
+This is a setup for managing synapse with systemd including support for
+managing workers. It provides a `matrix-synapse`, as well as a
+`matrix-synapse-worker@` service for any workers you require. Additionally to
+group the required services it sets up a `matrix.target`. You can use this to
+automatically start any bot- or bridge-services. More on this in
+[Bots and Bridges](#bots-and-bridges).
+
+See the folder [system](system) for any service and target files.
+
+The folder [workers](workers) contains an example configuration for the
+`federation_reader` worker. Pay special attention to the name of the
+configuration file. In order to work with the `matrix-synapse-worker@.service`
+service, it needs to have the exact same name as the worker app.
+
+This setup expects neither the homeserver nor any workers to fork. Forking is
+handled by systemd.
+
+## Setup
+
+1. Adjust your matrix configs. Make sure that the worker config files have the
+exact same name as the worker app. Compare `matrix-synapse-worker@.service` for
+why. You can find an example worker config in the [workers](workers) folder. See
+below for relevant settings in the `homeserver.yaml`.
+2. Copy the `*.service` and `*.target` files in [system](system) to
+`/etc/systemd/system`.
+3. `systemctl enable matrix-synapse.service` this adds the homeserver
+app to the `matrix.target`
+4. *Optional.* `systemctl enable
+matrix-synapse-worker@federation_reader.service` this adds the federation_reader
+app to the `matrix-synapse.service`
+5. *Optional.* Repeat step 4 for any additional workers you require.
+6. *Optional.* Add any bots or bridges by enabling them.
+7. Start all matrix related services via `systemctl start matrix.target`
+8. *Optional.* Enable autostart of all matrix related services on system boot
+via `systemctl enable matrix.target`
+
+## Usage
+
+After you have setup you can use the following commands to manage your synapse
+installation:
+
+```
+# Start matrix-synapse, all workers and any enabled bots or bridges.
+systemctl start matrix.target
+
+# Restart matrix-synapse and all workers (not necessarily restarting bots
+# or bridges, see "Bots and Bridges")
+systemctl restart matrix-synapse.service
+
+# Stop matrix-synapse and all workers (not necessarily restarting bots
+# or bridges, see "Bots and Bridges")
+systemctl stop matrix-synapse.service
+
+# Restart a specific worker (i. e. federation_reader), the homeserver is
+# unaffected by this.
+systemctl restart matrix-synapse-worker@federation_reader.service
+
+# Add a new worker (assuming all configs are setup already)
+systemctl enable matrix-synapse-worker@federation_writer.service
+systemctl restart matrix-synapse.service
+```
+
+## The Configs
+
+Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork.
+
+```
+worker_app: synapse.app.homeserver
+daemonize: false
+```
+
+None of the workers should fork, as forking is handled by systemd. Hence make
+sure this is present in all worker config files.
+
+```
+worker_daemonize: false
+```
+
+The config files of all workers are expected to be located in
+`/etc/matrix-synapse/workers`. If you want to use a different location you have
+to edit the provided `*.service` files accordingly.
+
+## Bots and Bridges
+
+Most bots and bridges do not care if the homeserver goes down or is restarted.
+Depending on the implementation this may crash them though. So look up the docs
+or ask the community of the specific bridge or bot you want to run to make sure
+you choose the correct setup.
+
+Whichever configuration you choose, after the setup the following will enable
+automatically starting (and potentially restarting) your bot/bridge with the
+`matrix.target`.
+
+```
+systemctl enable <yourBotOrBridgeName>.service
+```
+
+**Note** that from an inactive synapse the bots/bridges will only be started with
+synapse if you start the `matrix.target`, not if you start the
+`matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service`
+as *just* synapse, but `matrix.target` being anything matrix related, including
+synapse and any and all enabled bots and bridges.
+
+### Start with synapse but ignore synapse going down
+
+If the bridge can handle shutdowns of the homeserver you'll want to install the
+service in the `matrix.target` and optionally add a
+`After=matrix-synapse.service` dependency to have the bot/bridge start after
+synapse on starting everything.
+
+In this case the service file should look like this.
+
+```
+[Unit]
+# ...
+# Optional, this will only ensure that if you start everything, synapse will
+# be started before the bot/bridge will be started.
+After=matrix-synapse.service
+
+[Service]
+# ...
+
+[Install]
+WantedBy=matrix.target
+```
+
+### Stop/restart when synapse stops/restarts
+
+If the bridge can't handle shutdowns of the homeserver you'll still want to
+install the service in the `matrix.target` but also have to specify the
+`After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service`
+dependencies to have the bot/bridge stop/restart with synapse.
+
+In this case the service file should look like this.
+
+```
+[Unit]
+# ...
+# Mandatory
+After=matrix-synapse.service
+BindsTo=matrix-synapse.service
+
+[Service]
+# ...
+
+[Install]
+WantedBy=matrix.target
+```
diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service
new file mode 100644
index 0000000000..912984b9d2
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Synapse Matrix Worker
+After=matrix-synapse.service
+BindsTo=matrix-synapse.service
+
+[Service]
+Type=simple
+User=matrix-synapse
+WorkingDirectory=/var/lib/matrix-synapse
+EnvironmentFile=/etc/default/matrix-synapse
+ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=3
+
+[Install]
+WantedBy=matrix-synapse.service
diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service
new file mode 100644
index 0000000000..8bb4e400dc
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix-synapse.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Synapse Matrix Homeserver
+
+[Service]
+Type=simple
+User=matrix-synapse
+WorkingDirectory=/var/lib/matrix-synapse
+EnvironmentFile=/etc/default/matrix-synapse
+ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
+ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=3
+
+[Install]
+WantedBy=matrix.target
diff --git a/contrib/systemd-with-workers/system/matrix.target b/contrib/systemd-with-workers/system/matrix.target
new file mode 100644
index 0000000000..aff97d03ef
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix.target
@@ -0,0 +1,7 @@
+[Unit]
+Description=Contains matrix services like synapse, bridges and bots
+After=network.target
+AllowIsolate=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/contrib/systemd-with-workers/workers/federation_reader.yaml b/contrib/systemd-with-workers/workers/federation_reader.yaml
new file mode 100644
index 0000000000..47c54ec0d4
--- /dev/null
+++ b/contrib/systemd-with-workers/workers/federation_reader.yaml
@@ -0,0 +1,14 @@
+worker_app: synapse.app.federation_reader
+
+worker_replication_host: 127.0.0.1
+worker_replication_port: 9092
+worker_replication_http_port: 9093
+
+worker_listeners:
+ - type: http
+ port: 8011
+ resources:
+ - names: [federation]
+
+worker_daemonize: false
+worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
diff --git a/docs/federate.md b/docs/federate.md
new file mode 100644
index 0000000000..186245a94b
--- /dev/null
+++ b/docs/federate.md
@@ -0,0 +1,125 @@
+Setting up Federation
+=====================
+
+Federation is the process by which users on different servers can participate
+in the same room. For this to work, those other servers must be able to contact
+yours to send messages.
+
+The ``server_name`` configured in the Synapse configuration file (often
+``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
+identified (eg: ``@user:example.com``, ``#room:example.com``). By
+default, it is also the domain that other servers will use to
+try to reach your server (via port 8448). This is easy to set
+up and will work provided you set the ``server_name`` to match your
+machine's public DNS hostname, and provide Synapse with a TLS certificate
+which is valid for your ``server_name``.
+
+Once you have completed the steps necessary to federate, you should be able to
+join a room via federation. (A good place to start is ``#synapse:matrix.org``
+- a room for Synapse admins.)
+
+
+## Delegation
+
+For a more flexible configuration, you can have ``server_name``
+resources (eg: ``@user:example.com``) served by a different host and
+port (eg: ``synapse.example.com:443``). There are two ways to do this:
+
+- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
+- adding a DNS ``SRV`` record in the DNS zone of domain
+ ``example.com``.
+
+Without configuring delegation, the matrix federation will
+expect to find your server via ``example.com:8448``. The following methods
+allow you retain a `server_name` of `example.com` so that your user IDs, room
+aliases, etc continue to look like `*:example.com`, whilst having your
+federation traffic routed to a different server.
+
+### .well-known delegation
+
+To use this method, you need to be able to alter the
+``server_name`` 's https server to serve the ``/.well-known/matrix/server``
+URL. Having an active server (with a valid TLS certificate) serving your
+``server_name`` domain is out of the scope of this documentation.
+
+The URL ``https://<server_name>/.well-known/matrix/server`` should
+return a JSON structure containing the key ``m.server`` like so:
+
+ {
+ "m.server": "<synapse.server.name>[:<yourport>]"
+ }
+
+In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
+should return:
+
+ {
+ "m.server": "synapse.example.com:443"
+ }
+
+Note, specifying a port is optional. If a port is not specified an SRV lookup
+is performed, as described below. If the target of the
+delegation does not have an SRV record, then the port defaults to 8448.
+
+Most installations will not need to configure .well-known. However, it can be
+useful in cases where the admin is hosting on behalf of someone else and
+therefore cannot gain access to the necessary certificate. With .well-known,
+federation servers will check for a valid TLS certificate for the delegated
+hostname (in our example: ``synapse.example.com``).
+
+.well-known support first appeared in Synapse v0.99.0. To federate with older
+servers you may need to additionally configure SRV delegation. Alternatively,
+encourage the server admin in question to upgrade :).
+
+### DNS SRV delegation
+
+To use this delegation method, you need to have write access to your
+``server_name`` 's domain zone DNS records (in our example it would be
+``example.com`` DNS zone).
+
+This method requires the target server to provide a
+valid TLS certificate for the original ``server_name``.
+
+You need to add a SRV record in your ``server_name`` 's DNS zone with
+this format:
+
+ _matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
+
+In our example, we would need to add this SRV record in the
+``example.com`` DNS zone:
+
+ _matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
+
+
+Once done and set up, you can check the DNS record with ``dig -t srv
+_matrix._tcp.<server_name>``. In our example, we would expect this:
+
+ $ dig -t srv _matrix._tcp.example.com
+ _matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
+
+Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
+directly to the server hosting the synapse instance.
+
+## Troubleshooting
+
+You can use the [federation tester](
+<https://matrix.org/federationtester>) to check if your homeserver is
+configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
+Note that you'll have to modify this URL to replace ``DOMAIN`` with your
+``server_name``. Hitting the API directly provides extra detail.
+
+The typical failure mode for federation is that when the server tries to join
+a room, it is rejected with "401: Unauthorized". Generally this means that other
+servers in the room could not access yours. (Joining a room over federation is
+a complicated dance which requires connections in both directions).
+
+Another common problem is that people on other servers can't join rooms that
+you invite them to. This can be caused by an incorrectly-configured reverse
+proxy: see [reverse_proxy.rst](<reverse_proxy.rst>) for instructions on how to correctly
+configure a reverse proxy.
+
+
+## Running a Demo Federation of Synapses
+
+If you want to get up and running quickly with a trio of homeservers in a
+private federation, there is a script in the ``demo`` directory. This is mainly
+useful just for development purposes. See [demo/README](<../demo/README>).
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 22d5e6b1d7..5f2534e465 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -246,6 +246,11 @@ listeners:
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
+# If supplying your own, be sure to use a `.pem` file that includes the
+# full certificate chain including any intermediate certificates (for
+# instance, if using certbot, use `fullchain.pem` as your certificate,
+# not `cert.pem`).
+#
#tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt"
# PEM-encoded private key for TLS
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 32e8b8a3f5..d4c6c4c8e2 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -63,12 +63,13 @@ def start_worker_reactor(appname, config):
start_reactor(
appname,
- config.soft_file_limit,
- config.gc_thresholds,
- config.worker_pid_file,
- config.worker_daemonize,
- config.worker_cpu_affinity,
- logger,
+ soft_file_limit=config.soft_file_limit,
+ gc_thresholds=config.gc_thresholds,
+ pid_file=config.worker_pid_file,
+ daemonize=config.worker_daemonize,
+ cpu_affinity=config.worker_cpu_affinity,
+ print_pidfile=config.print_pidfile,
+ logger=logger,
)
@@ -79,6 +80,7 @@ def start_reactor(
pid_file,
daemonize,
cpu_affinity,
+ print_pidfile,
logger,
):
""" Run the reactor in the main process
@@ -93,6 +95,7 @@ def start_reactor(
pid_file (str): name of pid file to write to if daemonize is True
daemonize (bool): true to run the reactor in a background process
cpu_affinity (int|None): cpu affinity mask
+ print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
"""
@@ -124,6 +127,9 @@ def start_reactor(
reactor.run()
if daemonize:
+ if print_pidfile:
+ print(pid_file)
+
daemon = Daemonize(
app=appname,
pid=pid_file,
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index e8b6cc3114..869c028d1f 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -376,6 +376,7 @@ def setup(config_options):
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
+ hs.setup_master()
@defer.inlineCallbacks
def do_acme():
@@ -636,17 +637,15 @@ def run(hs):
# be quite busy the first few minutes
clock.call_later(5 * 60, start_phone_stats_home)
- if hs.config.daemonize and hs.config.print_pidfile:
- print(hs.config.pid_file)
-
_base.start_reactor(
"synapse-homeserver",
- hs.config.soft_file_limit,
- hs.config.gc_thresholds,
- hs.config.pid_file,
- hs.config.daemonize,
- hs.config.cpu_affinity,
- logger,
+ soft_file_limit=hs.config.soft_file_limit,
+ gc_thresholds=hs.config.gc_thresholds,
+ pid_file=hs.config.pid_file,
+ daemonize=hs.config.daemonize,
+ cpu_affinity=hs.config.cpu_affinity,
+ print_pidfile=hs.config.print_pidfile,
+ logger=logger,
)
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index c4d3087fa4..5613f38e4d 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -214,14 +214,20 @@ class Config(object):
" Defaults to the directory containing the last config file",
)
+ obj = cls()
+
+ obj.invoke_all("add_arguments", config_parser)
+
config_args = config_parser.parse_args(argv)
config_files = find_config_files(search_paths=config_args.config_path)
- obj = cls()
obj.read_config_files(
config_files, keys_directory=config_args.keys_directory, generate_keys=False
)
+
+ obj.invoke_all("read_arguments", config_args)
+
return obj
@classmethod
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 40045de7ac..f0014902da 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -181,6 +181,11 @@ class TlsConfig(Config):
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
+ # If supplying your own, be sure to use a `.pem` file that includes the
+ # full certificate chain including any intermediate certificates (for
+ # instance, if using certbot, use `fullchain.pem` as your certificate,
+ # not `cert.pem`).
+ #
#tls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 80baf0ce0e..bfbd8b6c91 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -28,7 +28,7 @@ class WorkerConfig(Config):
if self.worker_app == "synapse.app.homeserver":
self.worker_app = None
- self.worker_listeners = config.get("worker_listeners")
+ self.worker_listeners = config.get("worker_listeners", [])
self.worker_daemonize = config.get("worker_daemonize")
self.worker_pid_file = config.get("worker_pid_file")
self.worker_log_file = config.get("worker_log_file")
@@ -48,6 +48,17 @@ class WorkerConfig(Config):
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
+ # This option is really only here to support `--manhole` command line
+ # argument.
+ manhole = config.get("worker_manhole")
+ if manhole:
+ self.worker_listeners.append({
+ "port": manhole,
+ "bind_addresses": ["127.0.0.1"],
+ "type": "manhole",
+ "tls": False,
+ })
+
if self.worker_listeners:
for listener in self.worker_listeners:
bind_address = listener.pop("bind_address", None)
@@ -57,3 +68,18 @@ class WorkerConfig(Config):
bind_addresses.append(bind_address)
elif not bind_addresses:
bind_addresses.append('')
+
+ def read_arguments(self, args):
+ # We support a bunch of command line arguments that override options in
+ # the config. A lot of these options have a worker_* prefix when running
+ # on workers so we also have to override them when command line options
+ # are specified.
+
+ if args.daemonize is not None:
+ self.worker_daemonize = args.daemonize
+ if args.log_config is not None:
+ self.worker_log_config = args.log_config
+ if args.log_file is not None:
+ self.worker_log_file = args.log_file
+ if args.manhole is not None:
+ self.worker_manhole = args.worker_manhole
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 4e8919d657..8e2be218e2 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -167,7 +167,7 @@ class TransportLayerClient(object):
# generated by the json_data_callback.
json_data = transaction.get_dict()
- path = _create_v1_path("/send/%s", transaction.transaction_id)
+ path = _create_v1_path("/send/%s/", transaction.transaction_id)
response = yield self.client.put_json(
transaction.destination,
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index efb6bdca48..96d680a5ad 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -312,7 +312,7 @@ class BaseFederationServlet(object):
class FederationSendServlet(BaseFederationServlet):
- PATH = "/send/(?P<transaction_id>[^/]*)/?"
+ PATH = "/send/(?P<transaction_id>[^/]*)/"
def __init__(self, handler, server_name, **kwargs):
super(FederationSendServlet, self).__init__(
@@ -378,7 +378,7 @@ class FederationSendServlet(BaseFederationServlet):
class FederationEventServlet(BaseFederationServlet):
- PATH = "/event/(?P<event_id>[^/]*)/?"
+ PATH = "/event/(?P<event_id>[^/]*)/"
# This is when someone asks for a data item for a given server data_id pair.
def on_GET(self, origin, content, query, event_id):
@@ -386,7 +386,7 @@ class FederationEventServlet(BaseFederationServlet):
class FederationStateServlet(BaseFederationServlet):
- PATH = "/state/(?P<context>[^/]*)/?"
+ PATH = "/state/(?P<context>[^/]*)/"
# This is when someone asks for all data for a given context.
def on_GET(self, origin, content, query, context):
@@ -398,7 +398,7 @@ class FederationStateServlet(BaseFederationServlet):
class FederationStateIdsServlet(BaseFederationServlet):
- PATH = "/state_ids/(?P<room_id>[^/]*)/?"
+ PATH = "/state_ids/(?P<room_id>[^/]*)/"
def on_GET(self, origin, content, query, room_id):
return self.handler.on_state_ids_request(
@@ -409,7 +409,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
class FederationBackfillServlet(BaseFederationServlet):
- PATH = "/backfill/(?P<context>[^/]*)/?"
+ PATH = "/backfill/(?P<context>[^/]*)/"
def on_GET(self, origin, content, query, context):
versions = [x.decode('ascii') for x in query[b"v"]]
@@ -1080,7 +1080,7 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet):
"""Get all categories for a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/categories/?"
+ "/groups/(?P<group_id>[^/]*)/categories/"
)
@defer.inlineCallbacks
@@ -1150,7 +1150,7 @@ class FederationGroupsRolesServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/roles/?"
+ "/groups/(?P<group_id>[^/]*)/roles/"
)
@defer.inlineCallbacks
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index c21da8343a..d92f8c529c 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -60,6 +60,12 @@ class UserDirectoryHandler(object):
self.update_user_directory = hs.config.update_user_directory
self.search_all_users = hs.config.user_directory_search_all_users
+ # If we're a worker, don't sleep when doing the initial room work, as it
+ # won't monopolise the master's CPU.
+ if hs.config.worker_app:
+ self.INITIAL_ROOM_SLEEP_MS = 0
+ self.INITIAL_USER_SLEEP_MS = 0
+
# When start up for the first time we need to populate the user_directory.
# This is a set of user_id's we've inserted already
self.initially_handled_users = set()
@@ -231,7 +237,7 @@ class UserDirectoryHandler(object):
unhandled_users = user_ids - self.initially_handled_users
yield self.store.add_profiles_to_user_dir(
- {user_id: users_with_profile[user_id] for user_id in unhandled_users},
+ {user_id: users_with_profile[user_id] for user_id in unhandled_users}
)
self.initially_handled_users |= unhandled_users
@@ -241,38 +247,58 @@ class UserDirectoryHandler(object):
# We also batch up inserts/updates, but try to avoid too many at once.
to_insert = set()
count = 0
- for user_id in user_ids:
- if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
-
- if not self.is_mine_id(user_id):
- count += 1
- continue
- if self.store.get_if_app_services_interested_in_user(user_id):
- count += 1
- continue
+ if is_public:
+ for user_id in user_ids:
+ if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
+ yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
- for other_user_id in user_ids:
- if user_id == other_user_id:
+ if self.store.get_if_app_services_interested_in_user(user_id):
+ count += 1
continue
+ to_insert.add(user_id)
+ if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
+ yield self.store.add_users_in_public_rooms(room_id, to_insert)
+ to_insert.clear()
+
+ if to_insert:
+ yield self.store.add_users_in_public_rooms(room_id, to_insert)
+ to_insert.clear()
+ else:
+
+ for user_id in user_ids:
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
- count += 1
- user_set = (user_id, other_user_id)
- to_insert.add(user_set)
+ if not self.is_mine_id(user_id):
+ count += 1
+ continue
- if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
- yield self.store.add_users_who_share_room(
- room_id, not is_public, to_insert
- )
- to_insert.clear()
+ if self.store.get_if_app_services_interested_in_user(user_id):
+ count += 1
+ continue
+
+ for other_user_id in user_ids:
+ if user_id == other_user_id:
+ continue
+
+ if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
+ yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
+ count += 1
+
+ user_set = (user_id, other_user_id)
+ to_insert.add(user_set)
+
+ if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
+ yield self.store.add_users_who_share_private_room(
+ room_id, not is_public, to_insert
+ )
+ to_insert.clear()
- if to_insert:
- yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
- to_insert.clear()
+ if to_insert:
+ yield self.store.add_users_who_share_private_room(room_id, to_insert)
+ to_insert.clear()
@defer.inlineCallbacks
def _handle_deltas(self, deltas):
@@ -445,34 +471,37 @@ class UserDirectoryHandler(object):
# Now we update users who share rooms with users.
users_with_profile = yield self.state.get_current_user_in_room(room_id)
- to_insert = set()
+ if is_public:
+ yield self.store.add_users_in_public_rooms(room_id, (user_id,))
+ else:
+ to_insert = set()
- # First, if they're our user then we need to update for every user
- if self.is_mine_id(user_id):
+ # First, if they're our user then we need to update for every user
+ if self.is_mine_id(user_id):
- is_appservice = self.store.get_if_app_services_interested_in_user(user_id)
+ is_appservice = self.store.get_if_app_services_interested_in_user(user_id)
- # We don't care about appservice users.
- if not is_appservice:
- for other_user_id in users_with_profile:
- if user_id == other_user_id:
- continue
+ # We don't care about appservice users.
+ if not is_appservice:
+ for other_user_id in users_with_profile:
+ if user_id == other_user_id:
+ continue
- to_insert.add((user_id, other_user_id))
+ to_insert.add((user_id, other_user_id))
- # Next we need to update for every local user in the room
- for other_user_id in users_with_profile:
- if user_id == other_user_id:
- continue
+ # Next we need to update for every local user in the room
+ for other_user_id in users_with_profile:
+ if user_id == other_user_id:
+ continue
- is_appservice = self.store.get_if_app_services_interested_in_user(
- other_user_id
- )
- if self.is_mine_id(other_user_id) and not is_appservice:
- to_insert.add((other_user_id, user_id))
+ is_appservice = self.store.get_if_app_services_interested_in_user(
+ other_user_id
+ )
+ if self.is_mine_id(other_user_id) and not is_appservice:
+ to_insert.add((other_user_id, user_id))
- if to_insert:
- yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
+ if to_insert:
+ yield self.store.add_users_who_share_private_room(room_id, to_insert)
@defer.inlineCallbacks
def _handle_remove_user(self, room_id, user_id):
@@ -487,10 +516,10 @@ class UserDirectoryHandler(object):
# Remove user from sharing tables
yield self.store.remove_user_who_share_room(user_id, room_id)
- # Are they still in a room with members? If not, remove them entirely.
- users_in_room_with = yield self.store.get_users_who_share_room_from_dir(user_id)
+ # Are they still in any rooms? If not, remove them entirely.
+ rooms_user_is_in = yield self.store.get_user_dir_rooms_user_is_in(user_id)
- if len(users_in_room_with) == 0:
+ if len(rooms_user_is_in) == 0:
yield self.store.remove_from_user_dir(user_id)
@defer.inlineCallbacks
diff --git a/synapse/server.py b/synapse/server.py
index ee1506f2ad..dc8f1ccb8c 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -185,6 +185,10 @@ class HomeServer(object):
'registration_handler',
]
+ REQUIRED_ON_MASTER_STARTUP = [
+ "user_directory_handler",
+ ]
+
# This is overridden in derived application classes
# (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
# instantiated during setup() for future return by get_datastore()
@@ -221,6 +225,15 @@ class HomeServer(object):
conn.commit()
logger.info("Finished setting up.")
+ def setup_master(self):
+ """
+ Some handlers have side effects on instantiation (like registering
+ background updates). This function causes them to be fetched, and
+ therefore instantiated, to run those side effects.
+ """
+ for i in self.REQUIRED_ON_MASTER_STARTUP:
+ getattr(self, "get_" + i)()
+
def get_reactor(self):
"""
Fetch the Twisted reactor in use by this HomeServer.
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index a0333d5309..7e3903859b 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -767,18 +767,25 @@ class SQLBaseStore(object):
"""
allvalues = {}
allvalues.update(keyvalues)
- allvalues.update(values)
allvalues.update(insertion_values)
+ if not values:
+ latter = "NOTHING"
+ else:
+ allvalues.update(values)
+ latter = (
+ "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
+ )
+
sql = (
"INSERT INTO %s (%s) VALUES (%s) "
- "ON CONFLICT (%s) DO UPDATE SET %s"
+ "ON CONFLICT (%s) DO %s"
) % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues),
", ".join(k for k in keyvalues),
- ", ".join(k + "=EXCLUDED." + k for k in values),
+ latter
)
txn.execute(sql, list(allvalues.values()))
diff --git a/synapse/storage/schema/delta/53/user_share.sql b/synapse/storage/schema/delta/53/user_share.sql
index 14424ded0c..5831b1a6f8 100644
--- a/synapse/storage/schema/delta/53/user_share.sql
+++ b/synapse/storage/schema/delta/53/user_share.sql
@@ -16,9 +16,6 @@
-- Old disused version of the tables below.
DROP TABLE IF EXISTS users_who_share_rooms;
--- This is no longer used because it's duplicated by the users_who_share_public_rooms
-DROP TABLE IF EXISTS users_in_public_rooms;
-
-- Tables keeping track of what users share rooms. This is a map of local users
-- to local or remote users, per room. Remote users cannot be in the user_id
-- column, only the other_user_id column. There are two tables, one for public
diff --git a/synapse/storage/schema/delta/53/users_in_public_rooms.sql b/synapse/storage/schema/delta/53/users_in_public_rooms.sql
new file mode 100644
index 0000000000..f7827ca6d2
--- /dev/null
+++ b/synapse/storage/schema/delta/53/users_in_public_rooms.sql
@@ -0,0 +1,28 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- We don't need the old version of this table.
+DROP TABLE IF EXISTS users_in_public_rooms;
+
+-- Old version of users_in_public_rooms
+DROP TABLE IF EXISTS users_who_share_public_rooms;
+
+-- Track what users are in public rooms.
+CREATE TABLE IF NOT EXISTS users_in_public_rooms (
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL
+);
+
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py
index 2317d22ed6..1c00b956e5 100644
--- a/synapse/storage/user_directory.py
+++ b/synapse/storage/user_directory.py
@@ -21,12 +21,11 @@ from six import iteritems
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules
+from synapse.storage._base import SQLBaseStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.storage.state import StateFilter
from synapse.types import get_domain_from_id, get_localpart_from_id
-from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
-
-from ._base import SQLBaseStore
+from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
@@ -242,14 +241,7 @@ class UserDirectoryStore(SQLBaseStore):
txn, table="user_directory_search", keyvalues={"user_id": user_id}
)
self._simple_delete_txn(
- txn,
- table="users_who_share_public_rooms",
- keyvalues={"user_id": user_id},
- )
- self._simple_delete_txn(
- txn,
- table="users_who_share_public_rooms",
- keyvalues={"other_user_id": user_id},
+ txn, table="users_in_public_rooms", keyvalues={"user_id": user_id}
)
self._simple_delete_txn(
txn,
@@ -271,9 +263,9 @@ class UserDirectoryStore(SQLBaseStore):
in the given room_id
"""
user_ids_share_pub = yield self._simple_select_onecol(
- table="users_who_share_public_rooms",
+ table="users_in_public_rooms",
keyvalues={"room_id": room_id},
- retcol="other_user_id",
+ retcol="user_id",
desc="get_users_in_dir_due_to_room",
)
@@ -311,26 +303,19 @@ class UserDirectoryStore(SQLBaseStore):
rows = yield self._execute("get_all_local_users", None, sql)
defer.returnValue([name for name, in rows])
- def add_users_who_share_room(self, room_id, share_private, user_id_tuples):
- """Insert entries into the users_who_share_*_rooms table. The first
+ def add_users_who_share_private_room(self, room_id, user_id_tuples):
+ """Insert entries into the users_who_share_private_rooms table. The first
user should be a local user.
Args:
room_id (str)
- share_private (bool): Is the room private
user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
"""
def _add_users_who_share_room_txn(txn):
-
- if share_private:
- tbl = "users_who_share_private_rooms"
- else:
- tbl = "users_who_share_public_rooms"
-
self._simple_upsert_many_txn(
txn,
- table=tbl,
+ table="users_who_share_private_rooms",
key_names=["user_id", "other_user_id", "room_id"],
key_values=[
(user_id, other_user_id, room_id)
@@ -339,15 +324,35 @@ class UserDirectoryStore(SQLBaseStore):
value_names=(),
value_values=None,
)
- for user_id, other_user_id in user_id_tuples:
- txn.call_after(
- self.get_users_who_share_room_from_dir.invalidate, (user_id,)
- )
return self.runInteraction(
"add_users_who_share_room", _add_users_who_share_room_txn
)
+ def add_users_in_public_rooms(self, room_id, user_ids):
+ """Insert entries into the users_who_share_private_rooms table. The first
+ user should be a local user.
+
+ Args:
+ room_id (str)
+ user_ids (list[str])
+ """
+
+ def _add_users_in_public_rooms_txn(txn):
+
+ self._simple_upsert_many_txn(
+ txn,
+ table="users_in_public_rooms",
+ key_names=["user_id", "room_id"],
+ key_values=[(user_id, room_id) for user_id in user_ids],
+ value_names=(),
+ value_values=None,
+ )
+
+ return self.runInteraction(
+ "add_users_in_public_rooms", _add_users_in_public_rooms_txn
+ )
+
def remove_user_who_share_room(self, user_id, room_id):
"""
Deletes entries in the users_who_share_*_rooms table. The first
@@ -371,25 +376,18 @@ class UserDirectoryStore(SQLBaseStore):
)
self._simple_delete_txn(
txn,
- table="users_who_share_public_rooms",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id, "room_id": room_id},
)
- self._simple_delete_txn(
- txn,
- table="users_who_share_public_rooms",
- keyvalues={"other_user_id": user_id, "room_id": room_id},
- )
- txn.call_after(
- self.get_users_who_share_room_from_dir.invalidate, (user_id,)
- )
return self.runInteraction(
"remove_user_who_share_room", _remove_user_who_share_room_txn
)
- @cachedInlineCallbacks(max_entries=500000, iterable=True)
- def get_users_who_share_room_from_dir(self, user_id):
- """Returns the set of users who share a room with `user_id`
+ @defer.inlineCallbacks
+ def get_user_dir_rooms_user_is_in(self, user_id):
+ """
+ Returns the rooms that a user is in.
Args:
user_id(str): Must be a local user
@@ -400,23 +398,19 @@ class UserDirectoryStore(SQLBaseStore):
rows = yield self._simple_select_onecol(
table="users_who_share_private_rooms",
keyvalues={"user_id": user_id},
- retcol="other_user_id",
- desc="get_users_who_share_room_with_user",
+ retcol="room_id",
+ desc="get_rooms_user_is_in",
)
pub_rows = yield self._simple_select_onecol(
- table="users_who_share_public_rooms",
+ table="users_in_public_rooms",
keyvalues={"user_id": user_id},
- retcol="other_user_id",
- desc="get_users_who_share_room_with_user",
+ retcol="room_id",
+ desc="get_rooms_user_is_in",
)
users = set(pub_rows)
users.update(rows)
-
- # Remove the user themselves from this list.
- users.discard(user_id)
-
defer.returnValue(list(users))
@defer.inlineCallbacks
@@ -452,10 +446,9 @@ class UserDirectoryStore(SQLBaseStore):
def _delete_all_from_user_dir_txn(txn):
txn.execute("DELETE FROM user_directory")
txn.execute("DELETE FROM user_directory_search")
- txn.execute("DELETE FROM users_who_share_public_rooms")
+ txn.execute("DELETE FROM users_in_public_rooms")
txn.execute("DELETE FROM users_who_share_private_rooms")
txn.call_after(self.get_user_in_directory.invalidate_all)
- txn.call_after(self.get_users_who_share_room_from_dir.invalidate_all)
return self.runInteraction(
"delete_all_from_user_dir", _delete_all_from_user_dir_txn
@@ -560,23 +553,19 @@ class UserDirectoryStore(SQLBaseStore):
"""
if self.hs.config.user_directory_search_all_users:
- # make s.user_id null to keep the ordering algorithm happy
- join_clause = """
- CROSS JOIN (SELECT NULL as user_id) AS s
- """
join_args = ()
where_clause = "1=1"
else:
- join_clause = """
- LEFT JOIN (
- SELECT other_user_id AS user_id FROM users_who_share_public_rooms
- UNION
- SELECT other_user_id AS user_id FROM users_who_share_private_rooms
- WHERE user_id = ?
- ) AS p USING (user_id)
- """
join_args = (user_id,)
- where_clause = "p.user_id IS NOT NULL"
+ where_clause = """
+ (
+ EXISTS (select 1 from users_in_public_rooms WHERE user_id = t.user_id)
+ OR EXISTS (
+ SELECT 1 FROM users_who_share_private_rooms
+ WHERE user_id = ? AND other_user_id = t.user_id
+ )
+ )
+ """
if isinstance(self.database_engine, PostgresEngine):
full_query, exact_query, prefix_query = _parse_query_postgres(search_term)
@@ -588,9 +577,8 @@ class UserDirectoryStore(SQLBaseStore):
# search: (domain, _, display name, localpart)
sql = """
SELECT d.user_id AS user_id, display_name, avatar_url
- FROM user_directory_search
+ FROM user_directory_search as t
INNER JOIN user_directory AS d USING (user_id)
- %s
WHERE
%s
AND vector @@ to_tsquery('english', ?)
@@ -617,7 +605,6 @@ class UserDirectoryStore(SQLBaseStore):
avatar_url IS NULL
LIMIT ?
""" % (
- join_clause,
where_clause,
)
args = join_args + (full_query, exact_query, prefix_query, limit + 1)
@@ -626,9 +613,8 @@ class UserDirectoryStore(SQLBaseStore):
sql = """
SELECT d.user_id AS user_id, display_name, avatar_url
- FROM user_directory_search
+ FROM user_directory_search as t
INNER JOIN user_directory AS d USING (user_id)
- %s
WHERE
%s
AND value MATCH ?
@@ -638,7 +624,6 @@ class UserDirectoryStore(SQLBaseStore):
avatar_url IS NULL
LIMIT ?
""" % (
- join_clause,
where_clause,
)
args = join_args + (search_query, limit + 1)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index b8e97390de..13486930fb 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -180,7 +180,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
- path="/_matrix/federation/v1/send/1000000",
+ path="/_matrix/federation/v1/send/1000000/",
data=_expect_edu_transaction(
"m.typing",
content={
@@ -201,7 +201,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
(request, channel) = self.make_request(
"PUT",
- "/_matrix/federation/v1/send/1000000",
+ "/_matrix/federation/v1/send/1000000/",
_make_edu_transaction_json(
"m.typing",
content={
@@ -257,7 +257,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
- path="/_matrix/federation/v1/send/1000000",
+ path="/_matrix/federation/v1/send/1000000/",
data=_expect_edu_transaction(
"m.typing",
content={
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index a16a2dc67b..114807efc1 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -114,13 +114,13 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.join(room, user=u2, tok=u2_token)
# Check we have populated the database correctly.
- shares_public = self.get_users_who_share_public_rooms()
shares_private = self.get_users_who_share_private_rooms()
+ public_users = self.get_users_in_public_rooms()
- self.assertEqual(shares_public, [])
self.assertEqual(
self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
)
+ self.assertEqual(public_users, [])
# We get one search result when searching for user2 by user1.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
@@ -138,11 +138,11 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.helper.leave(room, user=u2, tok=u2_token)
# Check we have removed the values.
- shares_public = self.get_users_who_share_public_rooms()
shares_private = self.get_users_who_share_private_rooms()
+ public_users = self.get_users_in_public_rooms()
- self.assertEqual(shares_public, [])
self.assertEqual(self._compress_shared(shares_private), set())
+ self.assertEqual(public_users, [])
# User1 now gets no search results for any of the other users.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
@@ -160,14 +160,18 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
r.add((i["user_id"], i["other_user_id"], i["room_id"]))
return r
- def get_users_who_share_public_rooms(self):
- return self.get_success(
+ def get_users_in_public_rooms(self):
+ r = self.get_success(
self.store._simple_select_list(
- "users_who_share_public_rooms",
+ "users_in_public_rooms",
None,
- ["user_id", "other_user_id", "room_id"],
+ ("user_id", "room_id"),
)
)
+ retval = []
+ for i in r:
+ retval.append((i["user_id"], i["room_id"]))
+ return retval
def get_users_who_share_private_rooms(self):
return self.get_success(
@@ -200,11 +204,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.update_user_directory_stream_pos(None))
self.get_success(self.store.delete_all_from_user_dir())
- shares_public = self.get_users_who_share_public_rooms()
shares_private = self.get_users_who_share_private_rooms()
+ public_users = self.get_users_in_public_rooms()
+ # Nothing updated yet
self.assertEqual(shares_private, [])
- self.assertEqual(shares_public, [])
+ self.assertEqual(public_users, [])
# Reset the handled users caches
self.handler.initially_handled_users = set()
@@ -219,12 +224,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.get_success(d)
- shares_public = self.get_users_who_share_public_rooms()
shares_private = self.get_users_who_share_private_rooms()
+ public_users = self.get_users_in_public_rooms()
- # User 1 and User 2 share public rooms
+ # User 1 and User 2 are in the same public room
self.assertEqual(
- self._compress_shared(shares_public), set([(u1, u2, room), (u2, u1, room)])
+ set(public_users), set([(u1, room), (u2, room)])
)
# User 1 and User 3 share private rooms
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index a2a652a235..512d76e7a3 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -41,8 +41,8 @@ class UserDirectoryStoreTestCase(unittest.TestCase):
BOBBY: ProfileInfo(None, "bobby"),
},
)
- yield self.store.add_users_who_share_room(
- "!room:id", False, ((ALICE, BOB), (BOB, ALICE))
+ yield self.store.add_users_in_public_rooms(
+ "!room:id", (ALICE, BOB)
)
@defer.inlineCallbacks
diff --git a/tests/utils.py b/tests/utils.py
index 9c8dc9dbce..03b5a05b22 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -331,6 +331,8 @@ def setup_test_homeserver(
cleanup_func(cleanup)
hs.setup()
+ if homeserverToUse.__name__ == "TestHomeServer":
+ hs.setup_master()
else:
hs = homeserverToUse(
name,
|