diff --git a/.circleci/config.yml b/.circleci/config.yml
index 521aca22ef..ec3848b048 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,5 +1,27 @@
version: 2
jobs:
+ dockerhubuploadrelease:
+ machine: true
+ steps:
+ - checkout
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
+ - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
+ - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
+ - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
+ dockerhubuploadlatest:
+ machine: true
+ steps:
+ - checkout
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
+ - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
+ - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
+ - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
+ - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
+ - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
+ - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
+ - run: docker push matrixdotorg/synapse:latest
+ - run: docker push matrixdotorg/synapse:latest-py3
sytestpy2:
machine: true
steps:
@@ -131,3 +153,13 @@ workflows:
filters:
branches:
ignore: /develop|master|release-.*/
+ - dockerhubuploadrelease:
+ filters:
+ tags:
+ only: /v[0-9].[0-9]+.[0-9]+.*/
+ branches:
+ ignore: /.*/
+ - dockerhubuploadlatest:
+ filters:
+ branches:
+ only: master
diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh
index 9614eb91b6..6b0bf3aa48 100755
--- a/.circleci/merge_base_branch.sh
+++ b/.circleci/merge_base_branch.sh
@@ -9,13 +9,16 @@ source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
- echo "Can't figure out what the PR number is!"
- exit 1
+ echo "Can't figure out what the PR number is! Assuming merge target is develop."
+
+ # It probably hasn't had a PR opened yet. Since all PRs land on develop, we
+ # can probably assume it's based on it and will be merged into it.
+ GITBASE="develop"
+else
+ # Get the reference, using the GitHub API
+ GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
fi
-# Get the reference, using the GitHub API
-GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
-
# Show what we are before
git show -s
diff --git a/.travis.yml b/.travis.yml
index b6faca4b92..2077f6af72 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -21,6 +21,9 @@ matrix:
env: TOX_ENV=py27
- python: 2.7
+ env: TOX_ENV=py27-old
+
+ - python: 2.7
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
diff --git a/CHANGES.md b/CHANGES.md
index 45d3cdb131..048b9f95db 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,11 +1,91 @@
+Synapse 0.33.6 (2018-10-04)
+===========================
+
+Internal Changes
+----------------
+
+- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002))
+
+
+Synapse 0.33.6rc1 (2018-10-03)
+==============================
+
+Features
+--------
+
+- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883))
+- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894))
+- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916))
+- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959))
+- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964))
+
+
+Bugfixes
+--------
+
+- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578))
+- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836))
+- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868))
+- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879))
+- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889))
+- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895))
+- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899))
+- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907))
+- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908))
+- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910))
+- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914))
+- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980))
+- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936))
+- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938))
+- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956))
+- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960))
+- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961))
+- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968))
+- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970))
+- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986))
+- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990))
+
+
+Internal Changes
+----------------
+
+- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699))
+- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794))
+- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873))
+- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897))
+- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903))
+- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966))
+- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909))
+- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911))
+- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912))
+- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924))
+- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925))
+- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927))
+- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991))
+- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946))
+- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947))
+- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948))
+- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952))
+- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957))
+- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958))
+- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963))
+- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965))
+- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967))
+- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972))
+- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976))
+- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985))
+- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987))
+- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988))
+- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989))
+
+
Synapse 0.33.5.1 (2018-09-25)
=============================
Internal Changes
----------------
-- Fix incompatibility with older Twisted version in tests. Thanks
- @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940))
+- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940))
Synapse 0.33.5 (2018-09-24)
diff --git a/MANIFEST.in b/MANIFEST.in
index 47ae5a77b9..c6a37ac685 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -23,13 +23,9 @@ recursive-include synapse/static *.gif
recursive-include synapse/static *.html
recursive-include synapse/static *.js
-exclude jenkins.sh
-exclude jenkins*.sh
-exclude jenkins*
exclude Dockerfile
exclude .dockerignore
exclude test_postgresql.sh
-recursive-exclude jenkins *.sh
include pyproject.toml
recursive-include changelog.d *
@@ -38,3 +34,6 @@ prune .github
prune demo/etc
prune docker
prune .circleci
+
+exclude jenkins*
+recursive-exclude jenkins *.sh
diff --git a/MAP.rst b/MAP.rst
deleted file mode 100644
index 0f8e9818a8..0000000000
--- a/MAP.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Directory Structure
-===================
-
-Warning: this may be a bit stale...
-
-::
-
- .
- ├── cmdclient Basic CLI python Matrix client
- ├── demo Scripts for running standalone Matrix demos
- ├── docs All doc, including the draft Matrix API spec
- │  ├── client-server The client-server Matrix API spec
- │  ├── model Domain-specific elements of the Matrix API spec
- │  ├── server-server The server-server model of the Matrix API spec
- │  └── sphinx The internal API doc of the Synapse homeserver
- ├── experiments Early experiments of using Synapse's internal APIs
- ├── graph Visualisation of Matrix's distributed message store
- ├── synapse The reference Matrix homeserver implementation
- │  ├── api Common building blocks for the APIs
- │  │  ├── events Definition of state representation Events
- │  │  └── streams Definition of streamable Event objects
- │  ├── app The __main__ entry point for the homeserver
- │  ├── crypto The PKI client/server used for secure federation
- │  │  └── resource PKI helper objects (e.g. keys)
- │  ├── federation Server-server state replication logic
- │  ├── handlers The main business logic of the homeserver
- │  ├── http Wrappers around Twisted's HTTP server & client
- │  ├── rest Servlet-style RESTful API
- │  ├── storage Persistence subsystem (currently only sqlite3)
- │  │  └── schema sqlite persistence schema
- │  └── util Synapse-specific utilities
- ├── tests Unit tests for the Synapse homeserver
- └── webclient Basic AngularJS Matrix web client
-
-
diff --git a/README.rst b/README.rst
index 5547f617ba..e1ea351f84 100644
--- a/README.rst
+++ b/README.rst
@@ -81,7 +81,7 @@ Thanks for using Matrix!
Synapse Installation
====================
-Synapse is the reference python/twisted Matrix homeserver implementation.
+Synapse is the reference Python/Twisted Matrix homeserver implementation.
System requirements:
@@ -91,12 +91,13 @@ System requirements:
Installing from source
----------------------
+
(Prebuilt packages are available for some platforms - see `Platform-Specific
Instructions`_.)
-Synapse is written in python but some of the libraries it uses are written in
-C. So before we can install synapse itself we need a working C compiler and the
-header files for python C extensions.
+Synapse is written in Python but some of the libraries it uses are written in
+C. So before we can install Synapse itself we need a working C compiler and the
+header files for Python C extensions.
Installing prerequisites on Ubuntu or Debian::
@@ -143,18 +144,24 @@ Installing prerequisites on OpenBSD::
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
libxslt
-To install the synapse homeserver run::
+To install the Synapse homeserver run::
virtualenv -p python2.7 ~/.synapse
source ~/.synapse/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
- pip install https://github.com/matrix-org/synapse/tarball/master
+ pip install matrix-synapse
-This installs synapse, along with the libraries it uses, into a virtual
+This installs Synapse, along with the libraries it uses, into a virtual
environment under ``~/.synapse``. Feel free to pick a different directory
if you prefer.
+This Synapse installation can then be later upgraded by using pip again with the
+update flag::
+
+ source ~/.synapse/bin/activate
+ pip install -U matrix-synapse
+
In case of problems, please see the _`Troubleshooting` section below.
There is an offical synapse image available at
@@ -167,7 +174,7 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
Dockerfile to automate a synapse server in a single Docker image, at
https://hub.docker.com/r/avhost/docker-matrix/tags/
-Configuring synapse
+Configuring Synapse
-------------------
Before you can start Synapse, you will need to generate a configuration
@@ -249,26 +256,6 @@ Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See `<docs/turn-howto.rst>`_ for details.
-IPv6
-----
-
-As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
-for providing PR #1696.
-
-However, for federation to work on hosts with IPv6 DNS servers you **must**
-be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
-for details. We can't make Synapse depend on Twisted 17.1 by default
-yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
-so if you are using operating system dependencies you'll have to install your
-own Twisted 17.1 package via pip or backports etc.
-
-If you're running in a virtualenv then pip should have installed the newest
-Twisted automatically, but if your virtualenv is old you will need to manually
-upgrade to a newer Twisted dependency via:
-
- pip install Twisted>=17.1.0
-
-
Running Synapse
===============
@@ -444,8 +431,7 @@ settings require a slightly more difficult installation process.
using the ``.`` command, rather than ``bash``'s ``source``.
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
webpages for their titles.
-6) Use ``pip`` to install this repository: ``pip install
- https://github.com/matrix-org/synapse/tarball/master``
+6) Use ``pip`` to install this repository: ``pip install matrix-synapse``
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
chance of a compromised Synapse server being used to take over your box.
@@ -473,7 +459,7 @@ Troubleshooting
Troubleshooting Installation
----------------------------
-Synapse requires pip 1.7 or later, so if your OS provides too old a version you
+Synapse requires pip 8 or later, so if your OS provides too old a version you
may need to manually upgrade it::
sudo pip install --upgrade pip
@@ -508,28 +494,6 @@ failing, e.g.::
pip install twisted
-On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
-will need to export CFLAGS=-Qunused-arguments.
-
-Troubleshooting Running
------------------------
-
-If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
-to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
-encryption and digital signatures.
-Unfortunately PyNACL currently has a few issues
-(https://github.com/pyca/pynacl/issues/53) and
-(https://github.com/pyca/pynacl/issues/79) that mean it may not install
-correctly, causing all tests to fail with errors about missing "sodium.h". To
-fix try re-installing from PyPI or directly from
-(https://github.com/pyca/pynacl)::
-
- # Install from PyPI
- pip install --user --upgrade --force pynacl
-
- # Install from github
- pip install --user https://github.com/pyca/pynacl/tarball/master
-
Running out of File Handles
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/UPGRADE.rst b/UPGRADE.rst
index f6bb1070b1..6cf3169f75 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
.. code:: bash
- pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
+ pip install --upgrade --process-dependency-links matrix-synapse
# restart synapse
synctl restart
@@ -48,11 +48,11 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
-Upgrading to $NEXT_VERSION
+Upgrading to v0.27.3
====================
This release expands the anonymous usage stats sent if the opt-in
-``report_stats`` configuration is set to ``true``. We now capture RSS memory
+``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
diff --git a/changelog.d/3578.bugfix b/changelog.d/3578.bugfix
deleted file mode 100644
index 9c52b6fa7e..0000000000
--- a/changelog.d/3578.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix problem when playing media from Chrome using direct URL (thanks @remjey!)
diff --git a/changelog.d/3699.misc b/changelog.d/3699.misc
deleted file mode 100644
index 437efbd98f..0000000000
--- a/changelog.d/3699.misc
+++ /dev/null
@@ -1,2 +0,0 @@
-Unit tests can now be run under PostgreSQL in Docker using
-``test_postgresql.sh``.
diff --git a/changelog.d/3868.bugfix b/changelog.d/3868.bugfix
deleted file mode 100644
index 99da8389a5..0000000000
--- a/changelog.d/3868.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix broken invite email links for self hosted riots
diff --git a/changelog.d/3873.misc b/changelog.d/3873.misc
deleted file mode 100644
index 8104b5c085..0000000000
--- a/changelog.d/3873.misc
+++ /dev/null
@@ -1,2 +0,0 @@
-Remove documentation regarding installation on Cygwin, the use of WSL is
-recommended instead.
diff --git a/changelog.d/3879.bugfix b/changelog.d/3879.bugfix
deleted file mode 100644
index 82cb2a8ebc..0000000000
--- a/changelog.d/3879.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Don't ratelimit autojoins
diff --git a/changelog.d/3883.feature b/changelog.d/3883.feature
deleted file mode 100644
index c11e5c5309..0000000000
--- a/changelog.d/3883.feature
+++ /dev/null
@@ -1 +0,0 @@
-Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables.
\ No newline at end of file
diff --git a/changelog.d/3889.bugfix b/changelog.d/3889.bugfix
deleted file mode 100644
index e976425987..0000000000
--- a/changelog.d/3889.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix 500 error when deleting unknown room alias
diff --git a/changelog.d/3892.bugfix b/changelog.d/3892.bugfix
deleted file mode 100644
index 8b30afab04..0000000000
--- a/changelog.d/3892.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix some b'abcd' noise in logs and metrics
diff --git a/changelog.d/3894.feature b/changelog.d/3894.feature
deleted file mode 100644
index 1ed0cccdb2..0000000000
--- a/changelog.d/3894.feature
+++ /dev/null
@@ -1 +0,0 @@
-Report "python_version" in the phone home stats
diff --git a/changelog.d/3895.bugfix b/changelog.d/3895.bugfix
deleted file mode 100644
index 8b30afab04..0000000000
--- a/changelog.d/3895.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix some b'abcd' noise in logs and metrics
diff --git a/changelog.d/3897.misc b/changelog.d/3897.misc
deleted file mode 100644
index 87e7ac796e..0000000000
--- a/changelog.d/3897.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix typo in README, synaspse -> synapse
\ No newline at end of file
diff --git a/changelog.d/3899.bugfix b/changelog.d/3899.bugfix
deleted file mode 100644
index 5120e3a823..0000000000
--- a/changelog.d/3899.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers.
diff --git a/changelog.d/3903.misc b/changelog.d/3903.misc
deleted file mode 100644
index 49b64bf333..0000000000
--- a/changelog.d/3903.misc
+++ /dev/null
@@ -1 +0,0 @@
-Increase the timeout when filling missing events in federation requests
\ No newline at end of file
diff --git a/changelog.d/3904.misc b/changelog.d/3904.misc
deleted file mode 100644
index 1e3c8e1706..0000000000
--- a/changelog.d/3904.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the logging when handling a federation transaction
\ No newline at end of file
diff --git a/changelog.d/3906.misc b/changelog.d/3906.misc
deleted file mode 100644
index 11709186d3..0000000000
--- a/changelog.d/3906.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve logging of outbound federation requests
\ No newline at end of file
diff --git a/changelog.d/3907.bugfix b/changelog.d/3907.bugfix
deleted file mode 100644
index 45e010c052..0000000000
--- a/changelog.d/3907.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix incorrect server-name indication for outgoing federation requests
\ No newline at end of file
diff --git a/changelog.d/3908.bugfix b/changelog.d/3908.bugfix
deleted file mode 100644
index 518aee6c4d..0000000000
--- a/changelog.d/3908.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix adding client IPs to the database failing on Python 3.
\ No newline at end of file
diff --git a/changelog.d/3909.misc b/changelog.d/3909.misc
deleted file mode 100644
index 11709186d3..0000000000
--- a/changelog.d/3909.misc
+++ /dev/null
@@ -1 +0,0 @@
-Improve logging of outbound federation requests
\ No newline at end of file
diff --git a/changelog.d/3910.bugfix b/changelog.d/3910.bugfix
deleted file mode 100644
index 22ec2adc33..0000000000
--- a/changelog.d/3910.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where things occaisonally were not being timed out correctly.
diff --git a/changelog.d/3911.misc b/changelog.d/3911.misc
deleted file mode 100644
index e31311d520..0000000000
--- a/changelog.d/3911.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix the docker image building on python 3
diff --git a/changelog.d/3912.misc b/changelog.d/3912.misc
deleted file mode 100644
index 87d73697ea..0000000000
--- a/changelog.d/3912.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a regression test for logging failed HTTP requests on Python 3.
\ No newline at end of file
diff --git a/changelog.d/3914.bugfix b/changelog.d/3914.bugfix
deleted file mode 100644
index 27e6bad590..0000000000
--- a/changelog.d/3914.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where outbound federation would stop talking to some servers when using workers
diff --git a/changelog.d/3916.feature b/changelog.d/3916.feature
deleted file mode 100644
index 13282d992b..0000000000
--- a/changelog.d/3916.feature
+++ /dev/null
@@ -1 +0,0 @@
-Always LL ourselves if we're in a room
diff --git a/changelog.d/3924.misc b/changelog.d/3924.misc
deleted file mode 100644
index 8d010e0bd9..0000000000
--- a/changelog.d/3924.misc
+++ /dev/null
@@ -1 +0,0 @@
-Comments and interface cleanup for on_receive_pdu
\ No newline at end of file
diff --git a/changelog.d/3925.misc b/changelog.d/3925.misc
deleted file mode 100644
index 3e41f78ff5..0000000000
--- a/changelog.d/3925.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix spurious exceptions when remote http client closes conncetion
diff --git a/changelog.d/3927.misc b/changelog.d/3927.misc
deleted file mode 100644
index 4bd8e25f67..0000000000
--- a/changelog.d/3927.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log exceptions thrown by background tasks
diff --git a/changelog.d/3932.bugfix b/changelog.d/3932.bugfix
deleted file mode 100644
index 7578414ede..0000000000
--- a/changelog.d/3932.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix some instances of ExpiringCache not expiring cache items
diff --git a/changelog.d/3936.bugfix b/changelog.d/3936.bugfix
deleted file mode 100644
index 49b02b9e27..0000000000
--- a/changelog.d/3936.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix out-of-bounds error when LLing yourself
diff --git a/changelog.d/3947.misc b/changelog.d/3947.misc
deleted file mode 100644
index 5a9a22bed9..0000000000
--- a/changelog.d/3947.misc
+++ /dev/null
@@ -1 +0,0 @@
-Require attrs 16.0.0 or later
diff --git a/changelog.d/3948.misc b/changelog.d/3948.misc
deleted file mode 100644
index a93edbd1c3..0000000000
--- a/changelog.d/3948.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix incompatibility with python3 on alpine
\ No newline at end of file
diff --git a/changelog.d/3956.bugfix b/changelog.d/3956.bugfix
deleted file mode 100644
index b0828c9fc6..0000000000
--- a/changelog.d/3956.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix exceptions from metrics handler
\ No newline at end of file
diff --git a/changelog.d/3957.misc b/changelog.d/3957.misc
deleted file mode 100644
index 69d647f119..0000000000
--- a/changelog.d/3957.misc
+++ /dev/null
@@ -1 +0,0 @@
-CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches.
diff --git a/changelog.d/3995.bugfix b/changelog.d/3995.bugfix
new file mode 100644
index 0000000000..2adc36756b
--- /dev/null
+++ b/changelog.d/3995.bugfix
@@ -0,0 +1 @@
+Fix bug in event persistence logic which caused 'NoneType is not iterable'
\ No newline at end of file
diff --git a/changelog.d/3996.bugfix b/changelog.d/3996.bugfix
new file mode 100644
index 0000000000..a056485ea1
--- /dev/null
+++ b/changelog.d/3996.bugfix
@@ -0,0 +1 @@
+Fix exception in background metrics collection
diff --git a/changelog.d/3997.bugfix b/changelog.d/3997.bugfix
new file mode 100644
index 0000000000..b060ee8c18
--- /dev/null
+++ b/changelog.d/3997.bugfix
@@ -0,0 +1 @@
+Fix exception handling in fetching remote profiles
diff --git a/changelog.d/3999.bugfix b/changelog.d/3999.bugfix
new file mode 100644
index 0000000000..dc3b2caffa
--- /dev/null
+++ b/changelog.d/3999.bugfix
@@ -0,0 +1 @@
+Fix handling of rejected threepid invites
diff --git a/changelog.d/4008.misc b/changelog.d/4008.misc
new file mode 100644
index 0000000000..5730210054
--- /dev/null
+++ b/changelog.d/4008.misc
@@ -0,0 +1 @@
+Log exceptions in looping calls
diff --git a/changelog.d/4017.misc b/changelog.d/4017.misc
new file mode 100644
index 0000000000..b1ceb06560
--- /dev/null
+++ b/changelog.d/4017.misc
@@ -0,0 +1 @@
+Optimisation for serving federation requests
\ No newline at end of file
diff --git a/changelog.d/4022.misc b/changelog.d/4022.misc
new file mode 100644
index 0000000000..5b0e136795
--- /dev/null
+++ b/changelog.d/4022.misc
@@ -0,0 +1 @@
+Add metric to count number of non-empty sync responses
diff --git a/changelog.d/4027.bugfix b/changelog.d/4027.bugfix
new file mode 100644
index 0000000000..c9bbff3f68
--- /dev/null
+++ b/changelog.d/4027.bugfix
@@ -0,0 +1 @@
+Workers now start on Python 3.
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1d00defc2d..db44c02a92 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,9 +1,13 @@
ARG PYTHON_VERSION=2
-FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
-COPY . /synapse
+###
+### Stage 0: builder
+###
+FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
-RUN apk add --no-cache --virtual .build_deps \
+# install the OS build deps
+
+RUN apk add \
build-base \
libffi-dev \
libjpeg-turbo-dev \
@@ -11,30 +15,47 @@ RUN apk add --no-cache --virtual .build_deps \
libxslt-dev \
linux-headers \
postgresql-dev \
- zlib-dev \
- && cd /synapse \
- && apk add --no-cache --virtual .runtime_deps \
- libffi \
- libjpeg-turbo \
- libressl \
- libxslt \
- libpq \
- zlib \
- su-exec \
- && pip install --upgrade \
+ zlib-dev
+
+# build things which have slow build steps, before we copy synapse, so that
+# the layer can be cached.
+#
+# (we really just care about caching a wheel here, as the "pip install" below
+# will install them again.)
+
+RUN pip install --prefix="/install" --no-warn-script-location \
+ cryptography \
+ msgpack-python \
+ pillow \
+ pynacl
+
+# now install synapse and all of the python deps to /install.
+
+COPY . /synapse
+RUN pip install --prefix="/install" --no-warn-script-location \
lxml \
- pip \
psycopg2 \
- setuptools \
- && mkdir -p /synapse/cache \
- && pip install -f /synapse/cache --upgrade --process-dependency-links . \
- && mv /synapse/docker/start.py /synapse/docker/conf / \
- && rm -rf \
- setup.cfg \
- setup.py \
- synapse \
- && apk del .build_deps
-
+ /synapse
+
+###
+### Stage 1: runtime
+###
+
+FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
+
+RUN apk add --no-cache --virtual .runtime_deps \
+ libffi \
+ libjpeg-turbo \
+ libressl \
+ libxslt \
+ libpq \
+ zlib \
+ su-exec
+
+COPY --from=builder /install /usr/local
+COPY ./docker/start.py /start.py
+COPY ./docker/conf /conf
+
VOLUME ["/data"]
EXPOSE 8008/tcp 8448/tcp
diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh
deleted file mode 100755
index 07979bf8b8..0000000000
--- a/jenkins-dendron-haproxy-postgres.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
-./dendron/jenkins/build_dendron.sh
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
- --dendron $WORKSPACE/dendron/bin/dendron \
- --haproxy \
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
deleted file mode 100755
index 3b932fe340..0000000000
--- a/jenkins-dendron-postgres.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
-./dendron/jenkins/build_dendron.sh
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
- --dendron $WORKSPACE/dendron/bin/dendron \
diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh
deleted file mode 100755
index 11f1cab6c8..0000000000
--- a/jenkins-flake8.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
-
-# Output flake8 violations to violations.flake8.log
-export PEP8SUFFIX="--output-file=violations.flake8.log"
-
-rm .coverage* || echo "No coverage files to remove"
-
-tox -e packaging -e pep8
diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh
deleted file mode 100755
index 1afb736394..0000000000
--- a/jenkins-postgres.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-
-./sytest/jenkins/prep_sytest_for_postgres.sh
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh
deleted file mode 100755
index baf4713a01..0000000000
--- a/jenkins-sqlite.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export WORKSPACE
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-./jenkins/prepare_synapse.sh
-./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
-
-./sytest/jenkins/install_and_run.sh \
- --python $WORKSPACE/.tox/py27/bin/python \
- --synapse-directory $WORKSPACE \
diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh
deleted file mode 100755
index 4c2f103e80..0000000000
--- a/jenkins-unittests.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-: ${WORKSPACE:="$(pwd)"}
-
-export PYTHONDONTWRITEBYTECODE=yep
-export SYNAPSE_CACHE_FACTOR=1
-
-# Output test results as junit xml
-export TRIAL_FLAGS="--reporter=subunit"
-export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
-# Write coverage reports to a separate file for each process
-export COVERAGE_OPTS="-p"
-export DUMP_COVERAGE_COMMAND="coverage help"
-
-# Output flake8 violations to violations.flake8.log
-# Don't exit with non-0 status code on Jenkins,
-# so that the build steps continue and a later step can decided whether to
-# UNSTABLE or FAILURE this build.
-export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
-
-rm .coverage* || echo "No coverage files to remove"
-
-tox --notest -e py27
-TOX_BIN=$WORKSPACE/.tox/py27/bin
-python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
-$TOX_BIN/pip install lxml
-
-tox -e py27
diff --git a/jenkins/clone.sh b/jenkins/clone.sh
deleted file mode 100755
index ab30ac7782..0000000000
--- a/jenkins/clone.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#! /bin/bash
-
-# This clones a project from github into a named subdirectory
-# If the project has a branch with the same name as this branch
-# then it will checkout that branch after cloning.
-# Otherwise it will checkout "origin/develop."
-# The first argument is the name of the directory to checkout
-# the branch into.
-# The second argument is the URL of the remote repository to checkout.
-# Usually something like https://github.com/matrix-org/sytest.git
-
-set -eux
-
-NAME=$1
-PROJECT=$2
-BASE=".$NAME-base"
-
-# Update our mirror.
-if [ ! -d ".$NAME-base" ]; then
- # Create a local mirror of the source repository.
- # This saves us from having to download the entire repository
- # when this script is next run.
- git clone "$PROJECT" "$BASE" --mirror
-else
- # Fetch any updates from the source repository.
- (cd "$BASE"; git fetch -p)
-fi
-
-# Remove the existing repository so that we have a clean copy
-rm -rf "$NAME"
-# Cloning with --shared means that we will share portions of the
-# .git directory with our local mirror.
-git clone "$BASE" "$NAME" --shared
-
-# Jenkins may have supplied us with the name of the branch in the
-# environment. Otherwise we will have to guess based on the current
-# commit.
-: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
-cd "$NAME"
-# check out the relevant branch
-git checkout "${GIT_BRANCH}" || (
- echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
- git checkout "origin/develop"
-)
diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl
deleted file mode 100755
index 13e630fc11..0000000000
--- a/scripts-dev/copyrighter-sql.pl
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/perl -pi
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$copyright = <<EOT;
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-EOT
-
-s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/copyrighter.pl b/scripts-dev/copyrighter.pl
deleted file mode 100755
index 03656f697a..0000000000
--- a/scripts-dev/copyrighter.pl
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/perl -pi
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$copyright = <<EOT;
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-EOT
-
-s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py
index 6e45be75d6..fcc5568835 100755
--- a/scripts-dev/dump_macaroon.py
+++ b/scripts-dev/dump_macaroon.py
@@ -21,4 +21,4 @@ try:
verifier.verify(macaroon, key)
print "Signature is correct"
except Exception as e:
- print e.message
+ print str(e)
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 8c3d429351..91bdb3a25b 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -133,7 +133,7 @@ def register_new_user(user, password, server_location, shared_secret, admin):
print "Passwords do not match"
sys.exit(1)
- if not admin:
+ if admin is None:
admin = raw_input("Make admin [no]: ")
if admin in ("y", "yes", "true"):
admin = True
@@ -160,10 +160,16 @@ if __name__ == "__main__":
default=None,
help="New password for user. Will prompt if omitted.",
)
- parser.add_argument(
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
"-a", "--admin",
action="store_true",
- help="Register new user as an admin. Will prompt if omitted.",
+ help="Register new user as an admin. Will prompt if --no-admin is not set either.",
+ )
+ admin_group.add_argument(
+ "--no-admin",
+ action="store_true",
+ help="Register new user as a regular user. Will prompt if --admin is not set either.",
)
group = parser.add_mutually_exclusive_group(required=True)
@@ -197,4 +203,8 @@ if __name__ == "__main__":
else:
secret = args.shared_secret
- register_new_user(args.user, args.password, args.server_url, secret, args.admin)
+ admin = None
+ if args.admin or args.no_admin:
+ admin = args.admin
+
+ register_new_user(args.user, args.password, args.server_url, secret, admin)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index b1f7a89fba..43c5821ade 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -27,4 +27,4 @@ try:
except ImportError:
pass
-__version__ = "0.33.5.1"
+__version__ = "0.33.6"
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index a31a9a17e0..eed8c67e6a 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -226,7 +226,7 @@ class Filtering(object):
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
format_checker=FormatChecker())
except jsonschema.ValidationError as e:
- raise SynapseError(400, e.message)
+ raise SynapseError(400, str(e))
class FilterCollection(object):
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 71347912f1..6d9f1ca0ef 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -64,7 +64,7 @@ class ConsentURIBuilder(object):
"""
mac = hmac.new(
key=self._hmac_secret,
- msg=user_id,
+ msg=user_id.encode('ascii'),
digestmod=sha256,
).hexdigest()
consent_uri = "%s_matrix/consent?%s" % (
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index 3b6b9368b8..c3afcc573b 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -24,7 +24,7 @@ try:
python_dependencies.check_requirements()
except python_dependencies.MissingRequirementError as e:
message = "\n".join([
- "Missing Requirement: %s" % (e.message,),
+ "Missing Requirement: %s" % (str(e),),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 7c866e246a..18584226e9 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -17,6 +17,7 @@ import gc
import logging
import sys
+import psutil
from daemonize import Daemonize
from twisted.internet import error, reactor
@@ -24,12 +25,6 @@ from twisted.internet import error, reactor
from synapse.util import PreserveLoggingContext
from synapse.util.rlimit import change_resource_limit
-try:
- import affinity
-except Exception:
- affinity = None
-
-
logger = logging.getLogger(__name__)
@@ -89,15 +84,20 @@ def start_reactor(
with PreserveLoggingContext():
logger.info("Running")
if cpu_affinity is not None:
- if not affinity:
- quit_with_error(
- "Missing package 'affinity' required for cpu_affinity\n"
- "option\n\n"
- "Install by running:\n\n"
- " pip install affinity\n\n"
- )
- logger.info("Setting CPU affinity to %s" % cpu_affinity)
- affinity.set_process_affinity_mask(0, cpu_affinity)
+ # Turn the bitmask into bits, reverse it so we go from 0 up
+ mask_to_bits = bin(cpu_affinity)[2:][::-1]
+
+ cpus = []
+ cpu_num = 0
+
+ for i in mask_to_bits:
+ if i == "1":
+ cpus.append(cpu_num)
+ cpu_num += 1
+
+ p = psutil.Process()
+ p.cpu_affinity(cpus)
+
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 02039f7e79..8559e141af 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -136,7 +136,7 @@ def start(config_options):
"Synapse appservice", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.appservice"
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 4c73c637bb..76aed8c60a 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -153,7 +153,7 @@ def start(config_options):
"Synapse client reader", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.client_reader"
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index bc82197b2a..e4a68715aa 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -169,7 +169,7 @@ def start(config_options):
"Synapse event creator", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.event_creator"
@@ -178,6 +178,9 @@ def start(config_options):
setup_logging(config, use_worker_options=True)
+ # This should only be done on the user directory worker or the master
+ config.update_user_directory = False
+
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 18ca71ef99..228a297fb8 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -140,7 +140,7 @@ def start(config_options):
"Synapse federation reader", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_reader"
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index 6501c57792..e9a99d76e1 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -160,7 +160,7 @@ def start(config_options):
"Synapse federation sender", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender"
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index b076fbe522..fc4b25de1c 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -228,7 +228,7 @@ def start(config_options):
"Synapse frontend proxy", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.frontend_proxy"
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 8c5d858b0b..e3f0d99a3f 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -301,7 +301,7 @@ class SynapseHomeServer(HomeServer):
try:
database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e:
- quit_with_error(e.message)
+ quit_with_error(str(e))
# Gauges to expose monthly active user control metrics
@@ -328,7 +328,7 @@ def setup(config_options):
config_options,
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
if not config:
@@ -386,7 +386,6 @@ def setup(config_options):
hs.get_pusherpool().start()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
- hs.get_federation_client().start_get_pdu_cache()
reactor.callWhenRunning(start)
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 992d182dba..acc0487adc 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -133,7 +133,7 @@ def start(config_options):
"Synapse media repository", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.media_repository"
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 2ec4c7defb..0f9f8e19f6 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
+from synapse.replication.slave.storage._base import __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
@@ -49,31 +50,31 @@ class PusherSlaveStore(
SlavedAccountDataStore
):
update_pusher_last_stream_ordering_and_success = (
- DataStore.update_pusher_last_stream_ordering_and_success.__func__
+ __func__(DataStore.update_pusher_last_stream_ordering_and_success)
)
update_pusher_failing_since = (
- DataStore.update_pusher_failing_since.__func__
+ __func__(DataStore.update_pusher_failing_since)
)
update_pusher_last_stream_ordering = (
- DataStore.update_pusher_last_stream_ordering.__func__
+ __func__(DataStore.update_pusher_last_stream_ordering)
)
get_throttle_params_by_room = (
- DataStore.get_throttle_params_by_room.__func__
+ __func__(DataStore.get_throttle_params_by_room)
)
set_throttle_params = (
- DataStore.set_throttle_params.__func__
+ __func__(DataStore.set_throttle_params)
)
get_time_of_last_push_action_before = (
- DataStore.get_time_of_last_push_action_before.__func__
+ __func__(DataStore.get_time_of_last_push_action_before)
)
get_profile_displayname = (
- DataStore.get_profile_displayname.__func__
+ __func__(DataStore.get_profile_displayname)
)
@@ -191,7 +192,7 @@ def start(config_options):
"Synapse pusher", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.pusher"
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index df81b7bcbe..3926c7f263 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -33,7 +33,7 @@ from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
-from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
@@ -147,7 +147,7 @@ class SynchrotronPresence(object):
and haven't come back yet. If there are poke the master about them.
"""
now = self.clock.time_msec()
- for user_id, last_sync_ms in self.users_going_offline.items():
+ for user_id, last_sync_ms in list(self.users_going_offline.items()):
if now - last_sync_ms > 10 * 1000:
self.users_going_offline.pop(user_id, None)
self.send_user_sync(user_id, False, last_sync_ms)
@@ -156,9 +156,9 @@ class SynchrotronPresence(object):
# TODO Hows this supposed to work?
pass
- get_states = PresenceHandler.get_states.__func__
- get_state = PresenceHandler.get_state.__func__
- current_state_for_users = PresenceHandler.current_state_for_users.__func__
+ get_states = __func__(PresenceHandler.get_states)
+ get_state = __func__(PresenceHandler.get_state)
+ current_state_for_users = __func__(PresenceHandler.current_state_for_users)
def user_syncing(self, user_id, affect_presence):
if affect_presence:
@@ -208,7 +208,7 @@ class SynchrotronPresence(object):
) for row in rows]
for state in states:
- self.user_to_current_state[row.user_id] = state
+ self.user_to_current_state[state.user_id] = state
stream_id = token
yield self.notify_from_replication(states, stream_id)
@@ -410,7 +410,7 @@ def start(config_options):
"Synapse synchrotron", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.synchrotron"
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index b383e79c1c..0a5f62b509 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -188,7 +188,7 @@ def start(config_options):
"Synapse user directory", config_options
)
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.user_dir"
diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py
index 58c97a70af..8fccf573ee 100644
--- a/synapse/config/__main__.py
+++ b/synapse/config/__main__.py
@@ -25,7 +25,7 @@ if __name__ == "__main__":
try:
config = HomeServerConfig.load_config("", sys.argv[3:])
except ConfigError as e:
- sys.stderr.write("\n" + e.message + "\n")
+ sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
print (getattr(config, key))
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 6baeccca38..af3eee95b9 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
creation_event = auth_events.get((EventTypes.Create, ""), None)
if not creation_event:
- raise SynapseError(
+ raise AuthError(
403,
- "Room %r does not exist" % (event.room_id,)
+ "No create event in auth events",
)
creating_domain = get_domain_from_id(event.room_id)
@@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
if user_level < invite_level:
raise AuthError(
- 403, (
- "You cannot issue a third party invite for %s." %
- (event.content.display_name,)
- )
+ 403, "You don't have permission to invite users",
)
else:
logger.debug("Allowing! %s", event)
@@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events):
if user_level < invite_level:
raise AuthError(
- 403, "You cannot invite user %s." % target_user_id
+ 403, "You don't have permission to invite users",
)
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were:
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index b782af6308..12f1eb0a3e 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -13,15 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
+from distutils.util import strtobool
+
import six
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
-# bugs where we accidentally share e.g. signature dicts. However, converting
-# a dict to frozen_dicts is expensive.
-USE_FROZEN_DICTS = True
+# bugs where we accidentally share e.g. signature dicts. However, converting a
+# dict to frozen_dicts is expensive.
+#
+# NOTE: This is overridden by the configuration by the Synapse worker apps, but
+# for the sake of tests, it is set here while it cannot be configured on the
+# homeserver object itself.
+USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
class _EventInternalMetadata(object):
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 5a92428f56..d05ed91d64 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -209,8 +209,6 @@ class FederationClient(FederationBase):
Will attempt to get the PDU from each destination in the list until
one succeeds.
- This will persist the PDU locally upon receipt.
-
Args:
destinations (list): Which home servers to query
event_id (str): event to fetch
@@ -289,8 +287,7 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
@log_function
def get_state_for_room(self, destination, room_id, event_id):
- """Requests all of the `current` state PDUs for a given room from
- a remote home server.
+ """Requests all of the room state at a given event from a remote home server.
Args:
destination (str): The remote homeserver to query for the state.
@@ -298,9 +295,10 @@ class FederationClient(FederationBase):
event_id (str): The id of the event we want the state at.
Returns:
- Deferred: Results in a list of PDUs.
+ Deferred[Tuple[List[EventBase], List[EventBase]]]:
+ A list of events in the state, and a list of events in the auth chain
+ for the given event.
"""
-
try:
# First we try and ask for just the IDs, as thats far quicker if
# we have most of the state and auth_chain already.
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 9a571e4fc7..819e8f7331 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -46,6 +46,7 @@ from synapse.replication.http.federation import (
from synapse.types import get_domain_from_id
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
+from synapse.util.logcontext import nested_logging_context
from synapse.util.logutils import log_function
# when processing incoming transactions, we try to handle multiple rooms in
@@ -187,21 +188,22 @@ class FederationServer(FederationBase):
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
- try:
- yield self._handle_received_pdu(
- origin, pdu
- )
- pdu_results[event_id] = {}
- except FederationError as e:
- logger.warn("Error handling PDU %s: %s", event_id, e)
- pdu_results[event_id] = {"error": str(e)}
- except Exception as e:
- f = failure.Failure()
- pdu_results[event_id] = {"error": str(e)}
- logger.error(
- "Failed to handle PDU %s: %s",
- event_id, f.getTraceback().rstrip(),
- )
+ with nested_logging_context(event_id):
+ try:
+ yield self._handle_received_pdu(
+ origin, pdu
+ )
+ pdu_results[event_id] = {}
+ except FederationError as e:
+ logger.warn("Error handling PDU %s: %s", event_id, e)
+ pdu_results[event_id] = {"error": str(e)}
+ except Exception as e:
+ f = failure.Failure()
+ pdu_results[event_id] = {"error": str(e)}
+ logger.error(
+ "Failed to handle PDU %s: %s",
+ event_id, f.getTraceback().rstrip(),
+ )
yield concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(),
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index 8cbf8c4f7f..98b5950800 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -137,26 +137,6 @@ class TransactionQueue(object):
self._processing_pending_presence = False
- def can_send_to(self, destination):
- """Can we send messages to the given server?
-
- We can't send messages to ourselves. If we are running on localhost
- then we can only federation with other servers running on localhost.
- Otherwise we only federate with servers on a public domain.
-
- Args:
- destination(str): The server we are possibly trying to send to.
- Returns:
- bool: True if we can send to the server.
- """
-
- if destination == self.server_name:
- return False
- if self.server_name.startswith("localhost"):
- return destination.startswith("localhost")
- else:
- return not destination.startswith("localhost")
-
def notify_new_events(self, current_id):
"""This gets called when we have some new events we might want to
send out to other servers.
@@ -279,10 +259,7 @@ class TransactionQueue(object):
self._order += 1
destinations = set(destinations)
- destinations = set(
- dest for dest in destinations if self.can_send_to(dest)
- )
-
+ destinations.discard(self.server_name)
logger.debug("Sending to: %s", str(destinations))
if not destinations:
@@ -358,7 +335,7 @@ class TransactionQueue(object):
for destinations, states in hosts_and_states:
for destination in destinations:
- if not self.can_send_to(destination):
+ if destination == self.server_name:
continue
self.pending_presence_by_dest.setdefault(
@@ -377,7 +354,8 @@ class TransactionQueue(object):
content=content,
)
- if not self.can_send_to(destination):
+ if destination == self.server_name:
+ logger.info("Not sending EDU to ourselves")
return
sent_edus_counter.inc()
@@ -392,10 +370,8 @@ class TransactionQueue(object):
self._attempt_new_transaction(destination)
def send_device_messages(self, destination):
- if destination == self.server_name or destination == "localhost":
- return
-
- if not self.can_send_to(destination):
+ if destination == self.server_name:
+ logger.info("Not sending device update to ourselves")
return
self._attempt_new_transaction(destination)
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index f0f89af7dc..17eedf4dbf 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -28,6 +28,7 @@ from synapse.metrics import (
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import log_failure
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.metrics import Measure
@@ -36,17 +37,6 @@ logger = logging.getLogger(__name__)
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
-def log_failure(failure):
- logger.error(
- "Application Services Failure",
- exc_info=(
- failure.type,
- failure.value,
- failure.getTracebackObject()
- )
- )
-
-
class ApplicationServicesHandler(object):
def __init__(self, hs):
@@ -112,7 +102,10 @@ class ApplicationServicesHandler(object):
if not self.started_scheduler:
def start_scheduler():
- return self.scheduler.start().addErrback(log_failure)
+ return self.scheduler.start().addErrback(
+ log_failure, "Application Services Failure",
+ )
+
run_as_background_process("as_scheduler", start_scheduler)
self.started_scheduler = True
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 578e9250fb..9dc46aa15f 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -341,7 +341,7 @@ class E2eKeysHandler(object):
def _exception_to_failure(e):
if isinstance(e, CodeMessageException):
return {
- "status": e.code, "message": e.message,
+ "status": e.code, "message": str(e),
}
if isinstance(e, NotRetryingDestination):
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 2ccdc3bfa7..45d955e6f5 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -18,7 +18,6 @@
import itertools
import logging
-import sys
import six
from six import iteritems, itervalues
@@ -106,7 +105,7 @@ class FederationHandler(BaseHandler):
self.hs = hs
- self.store = hs.get_datastore()
+ self.store = hs.get_datastore() # type: synapse.storage.DataStore
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
@@ -323,14 +322,22 @@ class FederationHandler(BaseHandler):
affected=pdu.event_id,
)
- # Calculate the state of the previous events, and
- # de-conflict them to find the current state.
- state_groups = []
+ # Calculate the state after each of the previous events, and
+ # resolve them to find the correct state at the current event.
auth_chains = set()
+ event_map = {
+ event_id: pdu,
+ }
try:
# Get the state of the events we know about
- ours = yield self.store.get_state_groups(room_id, list(seen))
- state_groups.append(ours)
+ ours = yield self.store.get_state_groups_ids(room_id, seen)
+
+ # state_maps is a list of mappings from (type, state_key) to event_id
+ # type: list[dict[tuple[str, str], str]]
+ state_maps = list(ours.values())
+
+ # we don't need this any more, let's delete it.
+ del ours
# Ask the remote server for the states we don't
# know about
@@ -339,27 +346,65 @@ class FederationHandler(BaseHandler):
"[%s %s] Requesting state at missing prev_event %s",
room_id, event_id, p,
)
- state, got_auth_chain = (
- yield self.federation_client.get_state_for_room(
- origin, room_id, p,
+
+ with logcontext.nested_logging_context(p):
+ # note that if any of the missing prevs share missing state or
+ # auth events, the requests to fetch those events are deduped
+ # by the get_pdu_cache in federation_client.
+ remote_state, got_auth_chain = (
+ yield self.federation_client.get_state_for_room(
+ origin, room_id, p,
+ )
)
- )
- auth_chains.update(got_auth_chain)
- state_group = {(x.type, x.state_key): x.event_id for x in state}
- state_groups.append(state_group)
+
+ # we want the state *after* p; get_state_for_room returns the
+ # state *before* p.
+ remote_event = yield self.federation_client.get_pdu(
+ [origin], p, outlier=True,
+ )
+
+ if remote_event is None:
+ raise Exception(
+ "Unable to get missing prev_event %s" % (p, )
+ )
+
+ if remote_event.is_state():
+ remote_state.append(remote_event)
+
+ # XXX hrm I'm not convinced that duplicate events will compare
+ # for equality, so I'm not sure this does what the author
+ # hoped.
+ auth_chains.update(got_auth_chain)
+
+ remote_state_map = {
+ (x.type, x.state_key): x.event_id for x in remote_state
+ }
+ state_maps.append(remote_state_map)
+
+ for x in remote_state:
+ event_map[x.event_id] = x
# Resolve any conflicting state
+ @defer.inlineCallbacks
def fetch(ev_ids):
- return self.store.get_events(
- ev_ids, get_prev_content=False, check_redacted=False
+ fetched = yield self.store.get_events(
+ ev_ids, get_prev_content=False, check_redacted=False,
)
+ # add any events we fetch here to the `event_map` so that we
+ # can use them to build the state event list below.
+ event_map.update(fetched)
+ defer.returnValue(fetched)
room_version = yield self.store.get_room_version(room_id)
state_map = yield resolve_events_with_factory(
- room_version, state_groups, {event_id: pdu}, fetch
+ room_version, state_maps, event_map, fetch,
)
- state = (yield self.store.get_events(state_map.values())).values()
+ # we need to give _process_received_pdu the actual state events
+ # rather than event ids, so generate that now.
+ state = [
+ event_map[e] for e in six.itervalues(state_map)
+ ]
auth_chain = list(auth_chains)
except Exception:
logger.warn(
@@ -483,20 +528,21 @@ class FederationHandler(BaseHandler):
"[%s %s] Handling received prev_event %s",
room_id, event_id, ev.event_id,
)
- try:
- yield self.on_receive_pdu(
- origin,
- ev,
- sent_to_us_directly=False,
- )
- except FederationError as e:
- if e.code == 403:
- logger.warn(
- "[%s %s] Received prev_event %s failed history check.",
- room_id, event_id, ev.event_id,
+ with logcontext.nested_logging_context(ev.event_id):
+ try:
+ yield self.on_receive_pdu(
+ origin,
+ ev,
+ sent_to_us_directly=False,
)
- else:
- raise
+ except FederationError as e:
+ if e.code == 403:
+ logger.warn(
+ "[%s %s] Received prev_event %s failed history check.",
+ room_id, event_id, ev.event_id,
+ )
+ else:
+ raise
@defer.inlineCallbacks
def _process_received_pdu(self, origin, event, state, auth_chain):
@@ -572,6 +618,10 @@ class FederationHandler(BaseHandler):
})
seen_ids.add(e.event_id)
+ logger.info(
+ "[%s %s] persisting newly-received auth/state events %s",
+ room_id, event_id, [e["event"].event_id for e in event_infos]
+ )
yield self._handle_new_events(origin, event_infos)
try:
@@ -1135,7 +1185,8 @@ class FederationHandler(BaseHandler):
try:
logger.info("Processing queued PDU %s which was received "
"while we were joining %s", p.event_id, p.room_id)
- yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
+ with logcontext.nested_logging_context(p.event_id):
+ yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e:
logger.warn(
"Error handling queued PDU %s from %s: %s",
@@ -1550,6 +1601,9 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
+ # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
+ # hack around with a try/finally instead.
+ success = False
try:
if not event.internal_metadata.is_outlier() and not backfilled:
yield self.action_generator.handle_push_actions_for_event(
@@ -1560,15 +1614,13 @@ class FederationHandler(BaseHandler):
[(event, context)],
backfilled=backfilled,
)
- except: # noqa: E722, as we reraise the exception this is fine.
- tp, value, tb = sys.exc_info()
-
- logcontext.run_in_background(
- self.store.remove_push_actions_from_staging,
- event.event_id,
- )
-
- six.reraise(tp, value, tb)
+ success = True
+ finally:
+ if not success:
+ logcontext.run_in_background(
+ self.store.remove_push_actions_from_staging,
+ event.event_id,
+ )
defer.returnValue(context)
@@ -1581,15 +1633,22 @@ class FederationHandler(BaseHandler):
Notifies about the events where appropriate.
"""
- contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
- [
- logcontext.run_in_background(
- self._prep_event,
+
+ @defer.inlineCallbacks
+ def prep(ev_info):
+ event = ev_info["event"]
+ with logcontext.nested_logging_context(suffix=event.event_id):
+ res = yield self._prep_event(
origin,
- ev_info["event"],
+ event,
state=ev_info.get("state"),
auth_events=ev_info.get("auth_events"),
)
+ defer.returnValue(res)
+
+ contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
+ [
+ logcontext.run_in_background(prep, ev_info)
for ev_info in event_infos
], consumeErrors=True,
))
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index e484061cc0..4954b23a0d 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -14,9 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-import sys
-import six
from six import iteritems, itervalues, string_types
from canonicaljson import encode_canonical_json, json
@@ -624,6 +622,9 @@ class EventCreationHandler(object):
event, context
)
+ # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
+ # hack around with a try/finally instead.
+ success = False
try:
# If we're a worker we need to hit out to the master.
if self.config.worker_app:
@@ -636,6 +637,7 @@ class EventCreationHandler(object):
ratelimit=ratelimit,
extra_users=extra_users,
)
+ success = True
return
yield self.persist_and_notify_client_event(
@@ -645,17 +647,16 @@ class EventCreationHandler(object):
ratelimit=ratelimit,
extra_users=extra_users,
)
- except: # noqa: E722, as we reraise the exception this is fine.
- # Ensure that we actually remove the entries in the push actions
- # staging area, if we calculated them.
- tp, value, tb = sys.exc_info()
-
- run_in_background(
- self.store.remove_push_actions_from_staging,
- event.event_id,
- )
- six.reraise(tp, value, tb)
+ success = True
+ finally:
+ if not success:
+ # Ensure that we actually remove the entries in the push actions
+ # staging area, if we calculated them.
+ run_in_background(
+ self.store.remove_push_actions_from_staging,
+ event.event_id,
+ )
@defer.inlineCallbacks
def persist_and_notify_client_event(
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 75b8b7ce6a..1dfbde84fd 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404:
logger.exception("Failed to get displayname")
raise
- except Exception:
- logger.exception("Failed to get displayname")
- else:
- defer.returnValue(result["displayname"])
+
+ defer.returnValue(result["displayname"])
@defer.inlineCallbacks
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
@@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler):
if e.code != 404:
logger.exception("Failed to get avatar_url")
raise
- except Exception:
- logger.exception("Failed to get avatar_url")
defer.returnValue(result["avatar_url"])
@@ -278,7 +274,7 @@ class BaseProfileHandler(BaseHandler):
except Exception as e:
logger.warn(
"Failed to update join event for room %s - %s",
- room_id, str(e.message)
+ room_id, str(e)
)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index c7d69d9d80..351892a94f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -20,6 +20,8 @@ import logging
from six import iteritems, itervalues
+from prometheus_client import Counter
+
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
@@ -36,6 +38,19 @@ from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
+
+# Counts the number of times we returned a non-empty sync. `type` is one of
+# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is
+# "true" or "false" depending on if the request asked for lazy loaded members or
+# not.
+non_empty_sync_counter = Counter(
+ "synapse_handlers_sync_nonempty_total",
+ "Count of non empty sync responses. type is initial_sync/full_state_sync"
+ "/incremental_sync. lazy_loaded indicates if lazy loaded members were "
+ "enabled for that request.",
+ ["type", "lazy_loaded"],
+)
+
# Store the cache that tracks which lazy-loaded members have been sent to a given
# client for no more than 30 minutes.
LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
@@ -227,14 +242,16 @@ class SyncHandler(object):
@defer.inlineCallbacks
def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
full_state):
+ if since_token is None:
+ sync_type = "initial_sync"
+ elif full_state:
+ sync_type = "full_state_sync"
+ else:
+ sync_type = "incremental_sync"
+
context = LoggingContext.current_context()
if context:
- if since_token is None:
- context.tag = "initial_sync"
- elif full_state:
- context.tag = "full_state_sync"
- else:
- context.tag = "incremental_sync"
+ context.tag = sync_type
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
@@ -242,7 +259,6 @@ class SyncHandler(object):
result = yield self.current_sync_for_user(
sync_config, since_token, full_state=full_state,
)
- defer.returnValue(result)
else:
def current_sync_callback(before_token, after_token):
return self.current_sync_for_user(sync_config, since_token)
@@ -251,7 +267,15 @@ class SyncHandler(object):
sync_config.user.to_string(), timeout, current_sync_callback,
from_token=since_token,
)
- defer.returnValue(result)
+
+ if result:
+ if sync_config.filter_collection.lazy_load_members():
+ lazy_loaded = "true"
+ else:
+ lazy_loaded = "false"
+ non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
+
+ defer.returnValue(result)
def current_sync_for_user(self, sync_config, since_token=None,
full_state=False):
@@ -567,13 +591,13 @@ class SyncHandler(object):
# be a valid name or canonical_alias - i.e. we're checking that they
# haven't been "deleted" by blatting {} over the top.
if name_id:
- name = yield self.store.get_event(name_id, allow_none=False)
+ name = yield self.store.get_event(name_id, allow_none=True)
if name and name.content:
defer.returnValue(summary)
if canonical_alias_id:
canonical_alias = yield self.store.get_event(
- canonical_alias_id, allow_none=False,
+ canonical_alias_id, allow_none=True,
)
if canonical_alias and canonical_alias.content:
defer.returnValue(summary)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 2d2d3d5a0d..c610933dd4 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.errors import AuthError, SynapseError
from synapse.types import UserID, get_domain_from_id
+from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.logcontext import run_in_background
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
@@ -68,6 +69,11 @@ class TypingHandler(object):
# map room IDs to sets of users currently typing
self._room_typing = {}
+ # caches which room_ids changed at which serials
+ self._typing_stream_change_cache = StreamChangeCache(
+ "TypingStreamChangeCache", self._latest_room_serial,
+ )
+
self.clock.looping_call(
self._handle_timeouts,
5000,
@@ -218,6 +224,7 @@ class TypingHandler(object):
for domain in set(get_domain_from_id(u) for u in users):
if domain != self.server_name:
+ logger.debug("sending typing update to %s", domain)
self.federation.send_edu(
destination=domain,
edu_type="m.typing",
@@ -274,19 +281,29 @@ class TypingHandler(object):
self._latest_room_serial += 1
self._room_serials[member.room_id] = self._latest_room_serial
+ self._typing_stream_change_cache.entity_has_changed(
+ member.room_id, self._latest_room_serial,
+ )
self.notifier.on_new_event(
"typing_key", self._latest_room_serial, rooms=[member.room_id]
)
def get_all_typing_updates(self, last_id, current_id):
- # TODO: Work out a way to do this without scanning the entire state.
if last_id == current_id:
return []
+ changed_rooms = self._typing_stream_change_cache.get_all_entities_changed(
+ last_id,
+ )
+
+ if changed_rooms is None:
+ changed_rooms = self._room_serials
+
rows = []
- for room_id, serial in self._room_serials.items():
- if last_id < serial and serial <= current_id:
+ for room_id in changed_rooms:
+ serial = self._room_serials[room_id]
+ if last_id < serial <= current_id:
typing = self._room_typing[room_id]
rows.append((serial, room_id, list(typing)))
rows.sort()
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 50be2de3bb..e508c0bd4f 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -75,14 +75,14 @@ class SynapseRequest(Request):
return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % (
self.__class__.__name__,
id(self),
- self.method.decode('ascii', errors='replace'),
+ self.get_method(),
self.get_redacted_uri(),
self.clientproto.decode('ascii', errors='replace'),
self.site.site_tag,
)
def get_request_id(self):
- return "%s-%i" % (self.method.decode('ascii'), self.request_seq)
+ return "%s-%i" % (self.get_method(), self.request_seq)
def get_redacted_uri(self):
uri = self.uri
@@ -90,6 +90,21 @@ class SynapseRequest(Request):
uri = self.uri.decode('ascii')
return redact_uri(uri)
+ def get_method(self):
+ """Gets the method associated with the request (or placeholder if not
+ method has yet been received).
+
+ Note: This is necessary as the placeholder value in twisted is str
+ rather than bytes, so we need to sanitise `self.method`.
+
+ Returns:
+ str
+ """
+ method = self.method
+ if isinstance(method, bytes):
+ method = self.method.decode('ascii')
+ return method
+
def get_user_agent(self):
return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1]
@@ -119,7 +134,7 @@ class SynapseRequest(Request):
# dispatching to the handler, so that the handler
# can update the servlet name in the request
# metrics
- requests_counter.labels(self.method.decode('ascii'),
+ requests_counter.labels(self.get_method(),
self.request_metrics.name).inc()
@contextlib.contextmanager
@@ -207,14 +222,14 @@ class SynapseRequest(Request):
self.start_time = time.time()
self.request_metrics = RequestMetrics()
self.request_metrics.start(
- self.start_time, name=servlet_name, method=self.method.decode('ascii'),
+ self.start_time, name=servlet_name, method=self.get_method(),
)
self.site.access_logger.info(
"%s - %s - Received request: %s %s",
self.getClientIP(),
self.site.site_tag,
- self.method.decode('ascii'),
+ self.get_method(),
self.get_redacted_uri()
)
@@ -280,7 +295,7 @@ class SynapseRequest(Request):
int(usage.db_txn_count),
self.sentLength,
code,
- self.method.decode('ascii'),
+ self.get_method(),
self.get_redacted_uri(),
self.clientproto.decode('ascii', errors='replace'),
user_agent,
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 173908299c..037f1c490e 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -101,9 +101,13 @@ class _Collector(object):
labels=["name"],
)
- # We copy the dict so that it doesn't change from underneath us
+ # We copy the dict so that it doesn't change from underneath us.
+ # We also copy the process lists as that can also change
with _bg_metrics_lock:
- _background_processes_copy = dict(_background_processes)
+ _background_processes_copy = {
+ k: list(v)
+ for k, v in six.iteritems(_background_processes)
+ }
for desc, processes in six.iteritems(_background_processes_copy):
background_process_in_flight_count.add_metric(
diff --git a/synapse/notifier.py b/synapse/notifier.py
index f1d92c1395..340b16ce25 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -24,9 +24,10 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError
from synapse.handlers.presence import format_user_presence_state
from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import StreamToken
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
-from synapse.util.logcontext import PreserveLoggingContext, run_in_background
+from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
@@ -248,7 +249,10 @@ class Notifier(object):
def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
"""Notify any user streams that are interested in this room event"""
# poke any interested application service.
- run_in_background(self._notify_app_services, room_stream_id)
+ run_as_background_process(
+ "notify_app_services",
+ self._notify_app_services, room_stream_id,
+ )
if self.federation_sender:
self.federation_sender.notify_new_events(room_stream_id)
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index c779f69fa0..2947f37f1a 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -33,31 +33,35 @@ logger = logging.getLogger(__name__)
# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies
REQUIREMENTS = {
"jsonschema>=2.5.1": ["jsonschema>=2.5.1"],
- "frozendict>=0.4": ["frozendict"],
+ "frozendict>=1": ["frozendict"],
"unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"],
"canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"],
"signedjson>=1.0.0": ["signedjson>=1.0.0"],
"pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
- "service_identity>=1.0.0": ["service_identity>=1.0.0"],
+ "service_identity>=16.0.0": ["service_identity>=16.0.0"],
"Twisted>=17.1.0": ["twisted>=17.1.0"],
"treq>=15.1": ["treq>=15.1"],
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"],
- "pyyaml": ["yaml"],
- "pyasn1": ["pyasn1"],
- "daemonize": ["daemonize"],
- "bcrypt": ["bcrypt>=3.1.0"],
- "pillow": ["PIL"],
- "pydenticon": ["pydenticon"],
- "sortedcontainers": ["sortedcontainers"],
- "pysaml2>=3.0.0": ["saml2>=3.0.0"],
- "pymacaroons-pynacl": ["pymacaroons"],
+ "pyyaml>=3.11": ["yaml"],
+ "pyasn1>=0.1.9": ["pyasn1"],
+ "pyasn1-modules>=0.0.7": ["pyasn1_modules"],
+ "daemonize>=2.3.1": ["daemonize"],
+ "bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
+ "pillow>=3.1.2": ["PIL"],
+ "pydenticon>=0.2": ["pydenticon"],
+ "sortedcontainers>=1.4.4": ["sortedcontainers"],
+ "pysaml2>=3.0.0": ["saml2"],
+ "pymacaroons-pynacl>=0.9.3": ["pymacaroons"],
"msgpack-python>=0.3.0": ["msgpack"],
"phonenumbers>=8.2.0": ["phonenumbers"],
- "six": ["six"],
- "prometheus_client": ["prometheus_client"],
+ "six>=1.10": ["six"],
+
+ # prometheus_client 0.4.0 changed the format of counter metrics
+ # (cf https://github.com/matrix-org/synapse/issues/4001)
+ "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"],
# we use attr.s(slots), which arrived in 16.0.0
"attrs>=16.0.0": ["attr>=16.0.0"],
@@ -78,9 +82,6 @@ CONDITIONAL_REQUIREMENTS = {
"psutil": {
"psutil>=2.0.0": ["psutil>=2.0.0"],
},
- "affinity": {
- "affinity": ["affinity"],
- },
"postgres": {
"psycopg2>=2.6": ["psycopg2"]
}
diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py
index 3f7be74e02..2d81d49e9a 100644
--- a/synapse/replication/slave/storage/_base.py
+++ b/synapse/replication/slave/storage/_base.py
@@ -15,6 +15,8 @@
import logging
+import six
+
from synapse.storage._base import SQLBaseStore
from synapse.storage.engines import PostgresEngine
@@ -23,6 +25,13 @@ from ._slaved_id_tracker import SlavedIdTracker
logger = logging.getLogger(__name__)
+def __func__(inp):
+ if six.PY3:
+ return inp
+ else:
+ return inp.__func__
+
+
class BaseSlavedStore(SQLBaseStore):
def __init__(self, db_conn, hs):
super(BaseSlavedStore, self).__init__(db_conn, hs)
diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py
index 87eaa53004..4f19fd35aa 100644
--- a/synapse/replication/slave/storage/deviceinbox.py
+++ b/synapse/replication/slave/storage/deviceinbox.py
@@ -17,7 +17,7 @@ from synapse.storage import DataStore
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -43,11 +43,11 @@ class SlavedDeviceInboxStore(BaseSlavedStore):
expiry_ms=30 * 60 * 1000,
)
- get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__
- get_new_messages_for_device = DataStore.get_new_messages_for_device.__func__
- get_new_device_msgs_for_remote = DataStore.get_new_device_msgs_for_remote.__func__
- delete_messages_for_device = DataStore.delete_messages_for_device.__func__
- delete_device_msgs_for_remote = DataStore.delete_device_msgs_for_remote.__func__
+ get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token)
+ get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device)
+ get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote)
+ delete_messages_for_device = __func__(DataStore.delete_messages_for_device)
+ delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote)
def stream_positions(self):
result = super(SlavedDeviceInboxStore, self).stream_positions()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 21b8c468fa..ec2fd561cc 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -13,23 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import six
-
from synapse.storage import DataStore
from synapse.storage.end_to_end_keys import EndToEndKeyStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
-def __func__(inp):
- if six.PY3:
- return inp
- else:
- return inp.__func__
-
-
class SlavedDeviceStore(BaseSlavedStore):
def __init__(self, db_conn, hs):
super(SlavedDeviceStore, self).__init__(db_conn, hs)
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 5777f07c8d..e933b170bb 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -16,7 +16,7 @@
from synapse.storage import DataStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -33,9 +33,9 @@ class SlavedGroupServerStore(BaseSlavedStore):
"_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(),
)
- get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__
- get_group_stream_token = DataStore.get_group_stream_token.__func__
- get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__
+ get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user)
+ get_group_stream_token = __func__(DataStore.get_group_stream_token)
+ get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user)
def stream_positions(self):
result = super(SlavedGroupServerStore, self).stream_positions()
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index 05ed168463..8032f53fec 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -16,7 +16,7 @@
from synapse.storage import DataStore
from synapse.storage.keys import KeyStore
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
class SlavedKeyStore(BaseSlavedStore):
@@ -24,11 +24,11 @@ class SlavedKeyStore(BaseSlavedStore):
"_get_server_verify_key"
]
- get_server_verify_keys = DataStore.get_server_verify_keys.__func__
- store_server_verify_key = DataStore.store_server_verify_key.__func__
+ get_server_verify_keys = __func__(DataStore.get_server_verify_keys)
+ store_server_verify_key = __func__(DataStore.store_server_verify_key)
- get_server_certificate = DataStore.get_server_certificate.__func__
- store_server_certificate = DataStore.store_server_certificate.__func__
+ get_server_certificate = __func__(DataStore.get_server_certificate)
+ store_server_certificate = __func__(DataStore.store_server_certificate)
- get_server_keys_json = DataStore.get_server_keys_json.__func__
- store_server_keys_json = DataStore.store_server_keys_json.__func__
+ get_server_keys_json = __func__(DataStore.get_server_keys_json)
+ store_server_keys_json = __func__(DataStore.store_server_keys_json)
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 80b744082a..92447b00d4 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -17,7 +17,7 @@ from synapse.storage import DataStore
from synapse.storage.presence import PresenceStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore
+from ._base import BaseSlavedStore, __func__
from ._slaved_id_tracker import SlavedIdTracker
@@ -34,8 +34,8 @@ class SlavedPresenceStore(BaseSlavedStore):
"PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
)
- _get_active_presence = DataStore._get_active_presence.__func__
- take_presence_startup_info = DataStore.take_presence_startup_info.__func__
+ _get_active_presence = __func__(DataStore._get_active_presence)
+ take_presence_startup_info = __func__(DataStore.take_presence_startup_info)
_get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"]
get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"]
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index c95477d318..7a7157b352 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -65,10 +65,15 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
for event_ids in itervalues(conflicted_state)
for event_id in event_ids
)
+ needed_event_count = len(needed_events)
if event_map is not None:
needed_events -= set(iterkeys(event_map))
- logger.info("Asking for %d conflicted events", len(needed_events))
+ logger.info(
+ "Asking for %d/%d conflicted events",
+ len(needed_events),
+ needed_event_count,
+ )
# dict[str, FrozenEvent]: a map from state event id to event. Only includes
# the state events which are in conflict (and those in event_map)
@@ -85,11 +90,16 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory):
)
new_needed_events = set(itervalues(auth_events))
+ new_needed_event_count = len(new_needed_events)
new_needed_events -= needed_events
if event_map is not None:
new_needed_events -= set(iterkeys(event_map))
- logger.info("Asking for %d auth events", len(new_needed_events))
+ logger.info(
+ "Asking for %d/%d auth events",
+ len(new_needed_events),
+ new_needed_event_count,
+ )
state_map_new = yield state_map_factory(new_needed_events)
state_map.update(state_map_new)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index e7487311ce..03cedf3a75 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -38,6 +38,7 @@ from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
+from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
@@ -386,12 +387,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
for room_id, ev_ctx_rm in iteritems(events_by_room):
- # Work out new extremities by recursively adding and removing
- # the new events.
latest_event_ids = yield self.get_latest_event_ids_in_room(
room_id
)
- new_latest_event_ids = yield self._calculate_new_extremeties(
+ new_latest_event_ids = yield self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
@@ -400,6 +399,12 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
# No change in extremities, so no change in state
continue
+ # there should always be at least one forward extremity.
+ # (except during the initial persistence of the send_join
+ # results, in which case there will be no existing
+ # extremities, so we'll `continue` above and skip this bit.)
+ assert new_latest_event_ids, "No forward extremities left!"
+
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
@@ -517,44 +522,79 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
)
@defer.inlineCallbacks
- def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids):
- """Calculates the new forward extremeties for a room given events to
+ def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
+ """Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
- new_latest_event_ids = set(latest_event_ids)
- # First, add all the new events to the list
- new_latest_event_ids.update(
- event.event_id for event, ctx in event_contexts
+
+ # we're only interested in new events which aren't outliers and which aren't
+ # being rejected.
+ new_events = [
+ event for event, ctx in event_contexts
if not event.internal_metadata.is_outlier() and not ctx.rejected
+ ]
+
+ # start with the existing forward extremities
+ result = set(latest_event_ids)
+
+ # add all the new events to the list
+ result.update(
+ event.event_id for event in new_events
)
- # Now remove all events that are referenced by the to-be-added events
- new_latest_event_ids.difference_update(
+
+ # Now remove all events which are prev_events of any of the new events
+ result.difference_update(
e_id
- for event, ctx in event_contexts
+ for event in new_events
for e_id, _ in event.prev_events
- if not event.internal_metadata.is_outlier() and not ctx.rejected
)
- # And finally remove any events that are referenced by previously added
- # events.
- rows = yield self._simple_select_many_batch(
- table="event_edges",
- column="prev_event_id",
- iterable=list(new_latest_event_ids),
- retcols=["prev_event_id"],
- keyvalues={
- "is_state": False,
- },
- desc="_calculate_new_extremeties",
- )
+ # Finally, remove any events which are prev_events of any existing events.
+ existing_prevs = yield self._get_events_which_are_prevs(result)
+ result.difference_update(existing_prevs)
- new_latest_event_ids.difference_update(
- row["prev_event_id"] for row in rows
- )
+ defer.returnValue(result)
- defer.returnValue(new_latest_event_ids)
+ @defer.inlineCallbacks
+ def _get_events_which_are_prevs(self, event_ids):
+ """Filter the supplied list of event_ids to get those which are prev_events of
+ existing (non-outlier/rejected) events.
+
+ Args:
+ event_ids (Iterable[str]): event ids to filter
+
+ Returns:
+ Deferred[List[str]]: filtered event ids
+ """
+ results = []
+
+ def _get_events(txn, batch):
+ sql = """
+ SELECT prev_event_id
+ FROM event_edges
+ INNER JOIN events USING (event_id)
+ LEFT JOIN rejections USING (event_id)
+ WHERE
+ prev_event_id IN (%s)
+ AND NOT events.outlier
+ AND rejections.event_id IS NULL
+ """ % (
+ ",".join("?" for _ in batch),
+ )
+
+ txn.execute(sql, batch)
+ results.extend(r[0] for r in txn)
+
+ for chunk in batch_iter(event_ids, 100):
+ yield self.runInteraction(
+ "_get_events_which_are_prevs",
+ _get_events,
+ chunk,
+ )
+
+ defer.returnValue(results)
@defer.inlineCallbacks
def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids,
@@ -586,10 +626,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore
the new current state is only returned if we've already calculated
it.
"""
-
- if not new_latest_event_ids:
- return
-
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py
index 59580949f1..0fe8c8e24c 100644
--- a/synapse/storage/monthly_active_users.py
+++ b/synapse/storage/monthly_active_users.py
@@ -172,6 +172,10 @@ class MonthlyActiveUsersStore(SQLBaseStore):
Deferred[bool]: True if a new entry was created, False if an
existing one was updated.
"""
+ # Am consciously deciding to lock the table on the basis that is ought
+ # never be a big table and alternative approaches (batching multiple
+ # upserts into a single txn) introduced a lot of extra complexity.
+ # See https://github.com/matrix-org/synapse/issues/3854 for more
is_insert = yield self._simple_upsert(
desc="upsert_monthly_active_user",
table="monthly_active_users",
@@ -181,7 +185,6 @@ class MonthlyActiveUsersStore(SQLBaseStore):
values={
"timestamp": int(self._clock.time_msec()),
},
- lock=False,
)
if is_insert:
self.user_last_seen_monthly_active.invalidate((user_id,))
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index 4b971efdba..3f4cbd61c4 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -255,7 +255,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
@defer.inlineCallbacks
- def get_state_groups_ids(self, room_id, event_ids):
+ def get_state_groups_ids(self, _room_id, event_ids):
+ """Get the event IDs of all the state for the state groups for the given events
+
+ Args:
+ _room_id (str): id of the room for these events
+ event_ids (iterable[str]): ids of the events
+
+ Returns:
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
+ """
if not event_ids:
defer.returnValue({})
@@ -270,7 +280,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
@defer.inlineCallbacks
def get_state_ids_for_group(self, state_group):
- """Get the state IDs for the given state group
+ """Get the event IDs of all the state in the given state group
Args:
state_group (int)
@@ -286,7 +296,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
def get_state_groups(self, room_id, event_ids):
""" Get the state groups for the given list of event_ids
- The return value is a dict mapping group names to lists of events.
+ Returns:
+ Deferred[dict[int, list[EventBase]]]:
+ dict of state_group_id -> list of state events.
"""
if not event_ids:
defer.returnValue({})
@@ -324,7 +336,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
member events (if True), or to exclude member events (if False)
Returns:
- dictionary state_group -> (dict of (type, state_key) -> event id)
+ Returns:
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
"""
results = {}
@@ -732,8 +746,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
If None, `types` filtering is applied to all events.
Returns:
- Deferred[dict[int, dict[(type, state_key), EventBase]]]
- a dictionary mapping from state group to state dictionary.
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
"""
if types is not None:
non_member_types = [t for t in types if t[0] != EventTypes.Member]
@@ -788,8 +802,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
If None, `types` filtering is applied to all events.
Returns:
- Deferred[dict[int, dict[(type, state_key), EventBase]]]
- a dictionary mapping from state group to state dictionary.
+ Deferred[dict[int, dict[tuple[str, str], str]]]:
+ dict of state_group_id -> (dict of (type, state_key) -> event id)
"""
if types:
types = frozenset(types)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 4c296d72c0..d6cfdba519 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -630,7 +630,21 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
@defer.inlineCallbacks
def get_all_new_events_stream(self, from_id, current_id, limit):
- """Get all new events"""
+ """Get all new events
+
+ Returns all events with from_id < stream_ordering <= current_id.
+
+ Args:
+ from_id (int): the stream_ordering of the last event we processed
+ current_id (int): the stream_ordering of the most recently processed event
+ limit (int): the maximum number of events to return
+
+ Returns:
+ Deferred[Tuple[int, list[FrozenEvent]]]: A tuple of (next_id, events), where
+ `next_id` is the next value to pass as `from_id` (it will either be the
+ stream_ordering of the last returned event, or, if fewer than `limit` events
+ were found, `current_id`.
+ """
def get_all_new_events_stream_txn(txn):
sql = (
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index baf0379a68..a3032cdce9 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -23,6 +23,7 @@ from canonicaljson import encode_canonical_json
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util.caches.expiringcache import ExpiringCache
from ._base import SQLBaseStore, db_to_json
@@ -49,6 +50,8 @@ _UpdateTransactionRow = namedtuple(
)
)
+SENTINEL = object()
+
class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
@@ -59,6 +62,12 @@ class TransactionStore(SQLBaseStore):
self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)
+ self._destination_retry_cache = ExpiringCache(
+ cache_name="get_destination_retry_timings",
+ clock=self._clock,
+ expiry_ms=5 * 60 * 1000,
+ )
+
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
@@ -155,6 +164,7 @@ class TransactionStore(SQLBaseStore):
"""
pass
+ @defer.inlineCallbacks
def get_destination_retry_timings(self, destination):
"""Gets the current retry timings (if any) for a given destination.
@@ -165,10 +175,20 @@ class TransactionStore(SQLBaseStore):
None if not retrying
Otherwise a dict for the retry scheme
"""
- return self.runInteraction(
+
+ result = self._destination_retry_cache.get(destination, SENTINEL)
+ if result is not SENTINEL:
+ defer.returnValue(result)
+
+ result = yield self.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings, destination)
+ # We don't hugely care about race conditions between getting and
+ # invalidating the cache, since we time out fairly quickly anyway.
+ self._destination_retry_cache[destination] = result
+ defer.returnValue(result)
+
def _get_destination_retry_timings(self, txn, destination):
result = self._simple_select_one_txn(
txn,
@@ -196,6 +216,7 @@ class TransactionStore(SQLBaseStore):
retry_interval (int) - how long until next retry in ms
"""
+ self._destination_retry_cache.pop(destination, None)
return self.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 680ea928c7..9a8fae0497 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -68,7 +68,10 @@ class Clock(object):
"""
call = task.LoopingCall(f)
call.clock = self._reactor
- call.start(msec / 1000.0, now=False)
+ d = call.start(msec / 1000.0, now=False)
+ d.addErrback(
+ log_failure, "Looping call died", consumeErrors=False,
+ )
return call
def call_later(self, delay, callback, *args, **kwargs):
@@ -109,3 +112,29 @@ def batch_iter(iterable, size):
sourceiter = iter(iterable)
# call islice until it returns an empty tuple
return iter(lambda: tuple(islice(sourceiter, size)), ())
+
+
+def log_failure(failure, msg, consumeErrors=True):
+ """Creates a function suitable for passing to `Deferred.addErrback` that
+ logs any failures that occur.
+
+ Args:
+ msg (str): Message to log
+ consumeErrors (bool): If true consumes the failure, otherwise passes
+ on down the callback chain
+
+ Returns:
+ func(Failure)
+ """
+
+ logger.error(
+ msg,
+ exc_info=(
+ failure.type,
+ failure.value,
+ failure.getTracebackObject()
+ )
+ )
+
+ if not consumeErrors:
+ return failure
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index 9af4ec4aa8..f369780277 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -16,7 +16,7 @@
import logging
from collections import OrderedDict
-from six import itervalues
+from six import iteritems, itervalues
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.caches import register_cache
@@ -24,6 +24,9 @@ from synapse.util.caches import register_cache
logger = logging.getLogger(__name__)
+SENTINEL = object()
+
+
class ExpiringCache(object):
def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
reset_expiry_on_get=False, iterable=False):
@@ -95,6 +98,21 @@ class ExpiringCache(object):
return entry.value
+ def pop(self, key, default=SENTINEL):
+ """Removes and returns the value with the given key from the cache.
+
+ If the key isn't in the cache then `default` will be returned if
+ specified, otherwise `KeyError` will get raised.
+
+ Identical functionality to `dict.pop(..)`.
+ """
+
+ value = self._cache.pop(key, default)
+ if value is SENTINEL:
+ raise KeyError(key)
+
+ return value
+
def __contains__(self, key):
return key in self._cache
@@ -122,7 +140,7 @@ class ExpiringCache(object):
keys_to_delete = set()
- for key, cache_entry in self._cache.items():
+ for key, cache_entry in iteritems(self._cache):
if now - cache_entry.time > self._expiry_ms:
keys_to_delete.add(key)
@@ -146,6 +164,8 @@ class ExpiringCache(object):
class _CacheEntry(object):
+ __slots__ = ["time", "value"]
+
def __init__(self, time, value):
self.time = time
self.value = value
diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py
index a0c2d37610..89224b26cc 100644
--- a/synapse/util/logcontext.py
+++ b/synapse/util/logcontext.py
@@ -200,7 +200,7 @@ class LoggingContext(object):
sentinel = Sentinel()
- def __init__(self, name=None, parent_context=None):
+ def __init__(self, name=None, parent_context=None, request=None):
self.previous_context = LoggingContext.current_context()
self.name = name
@@ -218,6 +218,13 @@ class LoggingContext(object):
self.parent_context = parent_context
+ if self.parent_context is not None:
+ self.parent_context.copy_to(self)
+
+ if request is not None:
+ # the request param overrides the request from the parent context
+ self.request = request
+
def __str__(self):
return "%s@%x" % (self.name, id(self))
@@ -256,9 +263,6 @@ class LoggingContext(object):
)
self.alive = True
- if self.parent_context is not None:
- self.parent_context.copy_to(self)
-
return self
def __exit__(self, type, value, traceback):
@@ -439,6 +443,35 @@ class PreserveLoggingContext(object):
)
+def nested_logging_context(suffix, parent_context=None):
+ """Creates a new logging context as a child of another.
+
+ The nested logging context will have a 'request' made up of the parent context's
+ request, plus the given suffix.
+
+ CPU/db usage stats will be added to the parent context's on exit.
+
+ Normal usage looks like:
+
+ with nested_logging_context(suffix):
+ # ... do stuff
+
+ Args:
+ suffix (str): suffix to add to the parent context's 'request'.
+ parent_context (LoggingContext|None): parent context. Will use the current context
+ if None.
+
+ Returns:
+ LoggingContext: new logging context.
+ """
+ if parent_context is None:
+ parent_context = LoggingContext.current_context()
+ return LoggingContext(
+ parent_context=parent_context,
+ request=parent_context.request + "-" + suffix,
+ )
+
+
def preserve_fn(f):
"""Function decorator which wraps the function with run_in_background"""
def g(*args, **kwargs):
diff --git a/synapse/visibility.py b/synapse/visibility.py
index d4680863d3..c64ad2144c 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -324,14 +324,13 @@ def filter_events_for_server(store, server_name, events):
# server's domain.
#
# event_to_state_ids contains lots of duplicates, so it turns out to be
- # cheaper to build a complete set of unique
- # ((type, state_key), event_id) tuples, and then filter out the ones we
- # don't want.
+ # cheaper to build a complete event_id => (type, state_key) dict, and then
+ # filter out the ones we don't want
#
- state_key_to_event_id_set = {
- e
+ event_id_to_state_key = {
+ event_id: key
for key_to_eid in itervalues(event_to_state_ids)
- for e in key_to_eid.items()
+ for key, event_id in iteritems(key_to_eid)
}
def include(typ, state_key):
@@ -346,7 +345,7 @@ def filter_events_for_server(store, server_name, events):
event_map = yield store.get_events([
e_id
- for key, e_id in state_key_to_event_id_set
+ for e_id, key in iteritems(event_id_to_state_key)
if include(key[0], key[1])
])
diff --git a/synctl b/synctl
index 356e5cb6a7..09b64459b1 100755
--- a/synctl
+++ b/synctl
@@ -280,7 +280,7 @@ def main():
if worker.cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
- for cache_name, factor in worker.cache_factors.iteritems():
+ for cache_name, factor in iteritems(worker.cache_factors):
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
start_worker(worker.app, configfile, worker.configfile)
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index db44d33c68..41be5d5a1a 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from canonicaljson import encode_canonical_json
+
from synapse.events import FrozenEvent, _EventInternalMetadata
from synapse.events.snapshot import EventContext
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -26,7 +28,9 @@ ROOM_ID = "!room:blue"
def dict_equals(self, other):
- return self.__dict__ == other.__dict__
+ me = encode_canonical_json(self._event_dict)
+ them = encode_canonical_json(other._event_dict)
+ return me == them
def patch__eq__(cls):
diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py
new file mode 100644
index 0000000000..95badc985e
--- /dev/null
+++ b/tests/server_notices/test_consent.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.rest.client.v1 import admin, login, room
+from synapse.rest.client.v2_alpha import sync
+
+from tests import unittest
+
+
+class ConsentNoticesTests(unittest.HomeserverTestCase):
+
+ servlets = [
+ sync.register_servlets,
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+
+ self.consent_notice_message = "consent %(consent_uri)s"
+ config = self.default_config()
+ config.user_consent_version = "1"
+ config.user_consent_server_notice_content = {
+ "msgtype": "m.text",
+ "body": self.consent_notice_message,
+ }
+ config.public_baseurl = "https://example.com/"
+ config.form_secret = "123abc"
+
+ config.server_notices_mxid = "@notices:test"
+ config.server_notices_mxid_display_name = "test display name"
+ config.server_notices_mxid_avatar_url = None
+ config.server_notices_room_name = "Server Notices"
+
+ hs = self.setup_test_homeserver(config=config)
+
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("bob", "abc123")
+ self.access_token = self.login("bob", "abc123")
+
+ def test_get_sync_message(self):
+ """
+ When user consent server notices are enabled, a sync will cause a notice
+ to fire (in a room which the user is invited to). The notice contains
+ the notice URL + an authentication code.
+ """
+ # Initial sync, to get the user consent room invite
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/sync", access_token=self.access_token
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Get the Room ID to join
+ room_id = list(channel.json_body["rooms"]["invite"].keys())[0]
+
+ # Join the room
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/rooms/" + room_id + "/join",
+ access_token=self.access_token,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Sync again, to get the message in the room
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/sync", access_token=self.access_token
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ # Get the message
+ room = channel.json_body["rooms"]["join"][room_id]
+ messages = [
+ x for x in room["timeline"]["events"] if x["type"] == "m.room.message"
+ ]
+
+ # One message, with the consent URL
+ self.assertEqual(len(messages), 1)
+ self.assertTrue(
+ messages[0]["content"]["body"].startswith(
+ "consent https://example.com/_matrix/consent"
+ )
+ )
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 2ffbb9f14f..4577e9422b 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -14,10 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import hashlib
-import hmac
-import json
-
from mock import Mock
from twisted.internet import defer
@@ -145,34 +141,8 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
return hs
def prepare(self, hs, reactor, clock):
- self.hs.config.registration_shared_secret = u"shared"
self.store = self.hs.get_datastore()
-
- # Create the user
- request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
- self.render(request)
- nonce = channel.json_body["nonce"]
-
- want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
- want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin")
- want_mac = want_mac.hexdigest()
-
- body = json.dumps(
- {
- "nonce": nonce,
- "username": "bob",
- "password": "abc123",
- "admin": True,
- "mac": want_mac,
- }
- )
- request, channel = self.make_request(
- "POST", "/_matrix/client/r0/admin/register", body.encode('utf8')
- )
- self.render(request)
-
- self.assertEqual(channel.code, 200)
- self.user_id = channel.json_body["user_id"]
+ self.user_id = self.register_user("bob", "abc123", True)
def test_request_with_xforwarded(self):
"""
@@ -194,20 +164,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
def _runtest(self, headers, expected_ip, make_request_args):
device_id = "bleb"
- body = json.dumps(
- {
- "type": "m.login.password",
- "user": "bob",
- "password": "abc123",
- "device_id": device_id,
- }
- )
- request, channel = self.make_request(
- "POST", "/_matrix/client/r0/login", body.encode('utf8'), **make_request_args
- )
- self.render(request)
- self.assertEqual(channel.code, 200)
- access_token = channel.json_body["access_token"].encode('ascii')
+ access_token = self.login("bob", "abc123", device_id=device_id)
# Advance to a known time
self.reactor.advance(123456 - self.reactor.seconds())
@@ -215,7 +172,6 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
request, channel = self.make_request(
"GET",
"/_matrix/client/r0/admin/users/" + self.user_id,
- body.encode('utf8'),
access_token=access_token,
**make_request_args
)
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index b910965932..b9c5b39d59 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -75,6 +75,45 @@ class StateStoreTestCase(tests.unittest.TestCase):
self.assertEqual(len(s1), len(s2))
@defer.inlineCallbacks
+ def test_get_state_groups_ids(self):
+ e1 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, '', {}
+ )
+ e2 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"}
+ )
+
+ state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id])
+ self.assertEqual(len(state_group_map), 1)
+ state_map = list(state_group_map.values())[0]
+ self.assertDictEqual(
+ state_map,
+ {
+ (EventTypes.Create, ''): e1.event_id,
+ (EventTypes.Name, ''): e2.event_id,
+ },
+ )
+
+ @defer.inlineCallbacks
+ def test_get_state_groups(self):
+ e1 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, '', {}
+ )
+ e2 = yield self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"}
+ )
+
+ state_group_map = yield self.store.get_state_groups(
+ self.room, [e2.event_id])
+ self.assertEqual(len(state_group_map), 1)
+ state_list = list(state_group_map.values())[0]
+
+ self.assertEqual(
+ {ev.event_id for ev in state_list},
+ {e1.event_id, e2.event_id},
+ )
+
+ @defer.inlineCallbacks
def test_get_state_for_event(self):
# this defaults to a linear DAG as each new injection defaults to whatever
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
new file mode 100644
index 0000000000..14169afa96
--- /dev/null
+++ b/tests/storage/test_transactions.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tests.unittest import HomeserverTestCase
+
+
+class TransactionStoreTestCase(HomeserverTestCase):
+ def prepare(self, reactor, clock, homeserver):
+ self.store = homeserver.get_datastore()
+
+ def test_get_set_transactions(self):
+ """Tests that we can successfully get a non-existent entry for
+ destination retries, as well as testing tht we can set and get
+ correctly.
+ """
+ d = self.store.get_destination_retry_timings("example.com")
+ r = self.get_success(d)
+ self.assertIsNone(r)
+
+ d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ self.get_success(d)
+
+ d = self.store.get_destination_retry_timings("example.com")
+ r = self.get_success(d)
+
+ self.assert_dict({"retry_last_ts": 50, "retry_interval": 100}, r)
+
+ def test_initial_set_transactions(self):
+ """Tests that we can successfully set the destination retries (there
+ was a bug around invalidating the cache that broke this)
+ """
+ d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ self.get_success(d)
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 2540604fcc..952a0a7b51 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -6,6 +6,7 @@ from twisted.internet.defer import maybeDeferred, succeed
from synapse.events import FrozenEvent
from synapse.types import Requester, UserID
from synapse.util import Clock
+from synapse.util.logcontext import LoggingContext
from tests import unittest
from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
@@ -117,9 +118,10 @@ class MessageAcceptTests(unittest.TestCase):
}
)
- d = self.handler.on_receive_pdu(
- "test.serv", lying_event, sent_to_us_directly=True
- )
+ with LoggingContext(request="lying_event"):
+ d = self.handler.on_receive_pdu(
+ "test.serv", lying_event, sent_to_us_directly=True
+ )
# Step the reactor, so the database fetches come back
self.reactor.advance(1)
@@ -139,107 +141,3 @@ class MessageAcceptTests(unittest.TestCase):
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
-
- def test_cant_hide_past_history(self):
- """
- If you send a message, you must be able to provide the direct
- prev_events that said event references.
- """
-
- def post_json(destination, path, data, headers=None, timeout=0):
- if path.startswith("/_matrix/federation/v1/get_missing_events/"):
- return {
- "events": [
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "three:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [("four:test.serv", {})],
- }
- ]
- }
-
- self.http_client.post_json = post_json
-
- def get_json(destination, path, args, headers=None):
- if path.startswith("/_matrix/federation/v1/state_ids/"):
- d = self.successResultOf(
- self.homeserver.datastore.get_state_ids_for_event("one:test.serv")
- )
-
- return succeed(
- {
- "pdu_ids": [
- y
- for x, y in d.items()
- if x == ("m.room.member", "@us:test")
- ],
- "auth_chain_ids": list(d.values()),
- }
- )
-
- self.http_client.get_json = get_json
-
- # Figure out what the most recent event is
- most_recent = self.successResultOf(
- maybeDeferred(
- self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
- )
- )[0]
-
- # Make a good event
- good_event = FrozenEvent(
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "one:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [(most_recent, {})],
- }
- )
-
- d = self.handler.on_receive_pdu(
- "test.serv", good_event, sent_to_us_directly=True
- )
- self.reactor.advance(1)
- self.assertEqual(self.successResultOf(d), None)
-
- bad_event = FrozenEvent(
- {
- "room_id": self.room_id,
- "sender": "@baduser:test.serv",
- "event_id": "two:test.serv",
- "depth": 1000,
- "origin_server_ts": 1,
- "type": "m.room.message",
- "origin": "test.serv",
- "content": "hewwo?",
- "auth_events": [],
- "prev_events": [("one:test.serv", {}), ("three:test.serv", {})],
- }
- )
-
- d = self.handler.on_receive_pdu(
- "test.serv", bad_event, sent_to_us_directly=True
- )
- self.reactor.advance(1)
-
- extrem = maybeDeferred(
- self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
- )
- self.assertEqual(self.successResultOf(extrem)[0], "two:test.serv")
-
- state = self.homeserver.get_state_handler().get_current_state_ids(self.room_id)
- self.reactor.advance(1)
- self.assertIn(("m.room.member", "@us:test"), self.successResultOf(state).keys())
diff --git a/tests/unittest.py b/tests/unittest.py
index ef905e6389..a59291cc60 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import hashlib
+import hmac
import logging
from mock import Mock
@@ -32,6 +34,7 @@ from synapse.types import UserID, create_requester
from synapse.util.logcontext import LoggingContextFilter
from tests.server import get_clock, make_request, render, setup_test_homeserver
+from tests.utils import default_config
# Set up putting Synapse's logs into Trial's.
rootLogger = logging.getLogger()
@@ -223,6 +226,15 @@ class HomeserverTestCase(TestCase):
hs = self.setup_test_homeserver()
return hs
+ def default_config(self, name="test"):
+ """
+ Get a default HomeServer config object.
+
+ Args:
+ name (str): The homeserver name/domain.
+ """
+ return default_config(name)
+
def prepare(self, reactor, clock, homeserver):
"""
Prepare for the test. This involves things like mocking out parts of
@@ -297,3 +309,69 @@ class HomeserverTestCase(TestCase):
return d
self.pump()
return self.successResultOf(d)
+
+ def register_user(self, username, password, admin=False):
+ """
+ Register a user. Requires the Admin API be registered.
+
+ Args:
+ username (bytes/unicode): The user part of the new user.
+ password (bytes/unicode): The password of the new user.
+ admin (bool): Whether the user should be created as an admin
+ or not.
+
+ Returns:
+ The MXID of the new user (unicode).
+ """
+ self.hs.config.registration_shared_secret = u"shared"
+
+ # Create the user
+ request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
+ self.render(request)
+ nonce = channel.json_body["nonce"]
+
+ want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
+ nonce_str = b"\x00".join([username.encode('utf8'), password.encode('utf8')])
+ if admin:
+ nonce_str += b"\x00admin"
+ else:
+ nonce_str += b"\x00notadmin"
+ want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str)
+ want_mac = want_mac.hexdigest()
+
+ body = json.dumps(
+ {
+ "nonce": nonce,
+ "username": username,
+ "password": password,
+ "admin": admin,
+ "mac": want_mac,
+ }
+ )
+ request, channel = self.make_request(
+ "POST", "/_matrix/client/r0/admin/register", body.encode('utf8')
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ user_id = channel.json_body["user_id"]
+ return user_id
+
+ def login(self, username, password, device_id=None):
+ """
+ Log in a user, and get an access token. Requires the Login API be
+ registered.
+
+ """
+ body = {"type": "m.login.password", "user": username, "password": password}
+ if device_id:
+ body["device_id"] = device_id
+
+ request, channel = self.make_request(
+ "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8')
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 200)
+
+ access_token = channel.json_body["access_token"].encode('ascii')
+ return access_token
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 4633db77b3..8adaee3c8d 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -159,6 +159,11 @@ class LoggingContextTestCase(unittest.TestCase):
self.assertEqual(r, "bum")
self._check_test_key("one")
+ def test_nested_logging_context(self):
+ with LoggingContext(request="foo"):
+ nested_context = logcontext.nested_logging_context(suffix="bar")
+ self.assertEqual(nested_context.request, "foo-bar")
+
# a function which returns a deferred which has been "called", but
# which had a function which returned another incomplete deferred on
diff --git a/tests/utils.py b/tests/utils.py
index aaed1149c3..dd347a0c59 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -96,6 +96,64 @@ def setupdb():
atexit.register(_cleanup)
+def default_config(name):
+ """
+ Create a reasonable test config.
+ """
+ config = Mock()
+ config.signing_key = [MockKey()]
+ config.event_cache_size = 1
+ config.enable_registration = True
+ config.macaroon_secret_key = "not even a little secret"
+ config.expire_access_token = False
+ config.server_name = name
+ config.trusted_third_party_id_servers = []
+ config.room_invite_state_types = []
+ config.password_providers = []
+ config.worker_replication_url = ""
+ config.worker_app = None
+ config.email_enable_notifs = False
+ config.block_non_admin_invites = False
+ config.federation_domain_whitelist = None
+ config.federation_rc_reject_limit = 10
+ config.federation_rc_sleep_limit = 10
+ config.federation_rc_sleep_delay = 100
+ config.federation_rc_concurrent = 10
+ config.filter_timeline_limit = 5000
+ config.user_directory_search_all_users = False
+ config.user_consent_server_notice_content = None
+ config.block_events_without_consent_error = None
+ config.media_storage_providers = []
+ config.auto_join_rooms = []
+ config.limit_usage_by_mau = False
+ config.hs_disabled = False
+ config.hs_disabled_message = ""
+ config.hs_disabled_limit_type = ""
+ config.max_mau_value = 50
+ config.mau_trial_days = 0
+ config.mau_limits_reserved_threepids = []
+ config.admin_contact = None
+ config.rc_messages_per_second = 10000
+ config.rc_message_burst_count = 10000
+
+ config.use_frozen_dicts = False
+
+ # we need a sane default_room_version, otherwise attempts to create rooms will
+ # fail.
+ config.default_room_version = "1"
+
+ # disable user directory updates, because they get done in the
+ # background, which upsets the test runner.
+ config.update_user_directory = False
+
+ def is_threepid_reserved(threepid):
+ return ServerConfig.is_threepid_reserved(config, threepid)
+
+ config.is_threepid_reserved.side_effect = is_threepid_reserved
+
+ return config
+
+
class TestHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
@@ -124,56 +182,8 @@ def setup_test_homeserver(
from twisted.internet import reactor
if config is None:
- config = Mock()
- config.signing_key = [MockKey()]
- config.event_cache_size = 1
- config.enable_registration = True
- config.macaroon_secret_key = "not even a little secret"
- config.expire_access_token = False
- config.server_name = name
- config.trusted_third_party_id_servers = []
- config.room_invite_state_types = []
- config.password_providers = []
- config.worker_replication_url = ""
- config.worker_app = None
- config.email_enable_notifs = False
- config.block_non_admin_invites = False
- config.federation_domain_whitelist = None
- config.federation_rc_reject_limit = 10
- config.federation_rc_sleep_limit = 10
- config.federation_rc_sleep_delay = 100
- config.federation_rc_concurrent = 10
- config.filter_timeline_limit = 5000
- config.user_directory_search_all_users = False
- config.user_consent_server_notice_content = None
- config.block_events_without_consent_error = None
- config.media_storage_providers = []
- config.auto_join_rooms = []
- config.limit_usage_by_mau = False
- config.hs_disabled = False
- config.hs_disabled_message = ""
- config.hs_disabled_limit_type = ""
- config.max_mau_value = 50
- config.mau_trial_days = 0
- config.mau_limits_reserved_threepids = []
- config.admin_contact = None
- config.rc_messages_per_second = 10000
- config.rc_message_burst_count = 10000
-
- # we need a sane default_room_version, otherwise attempts to create rooms will
- # fail.
- config.default_room_version = "1"
-
- # disable user directory updates, because they get done in the
- # background, which upsets the test runner.
- config.update_user_directory = False
-
- def is_threepid_reserved(threepid):
- return ServerConfig.is_threepid_reserved(config, threepid)
-
- config.is_threepid_reserved.side_effect = is_threepid_reserved
-
- config.use_frozen_dicts = True
+ config = default_config(name)
+
config.ldap_enabled = False
if "clock" not in kargs:
diff --git a/tox.ini b/tox.ini
index e4db563b4b..87b5e4782d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -64,6 +64,26 @@ setenv =
{[base]setenv}
SYNAPSE_POSTGRES = 1
+# A test suite for the oldest supported versions of Python libraries, to catch
+# any uses of APIs not available in them.
+[testenv:py27-old]
+skip_install=True
+deps =
+ # Old automat version for Twisted
+ Automat == 0.3.0
+
+ mock
+ lxml
+commands =
+ /usr/bin/find "{toxinidir}" -name '*.pyc' -delete
+ # Make all greater-thans equals so we test the oldest version of our direct
+ # dependencies, but make the pyopenssl 17.0, which can work against an
+ # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis).
+ /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install'
+ # Install Synapse itself. This won't update any libraries.
+ pip install -e .
+ {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
+
[testenv:py35]
usedevelop=true
|