From 5b68816de9e9861f5113e99cc4c8f0779829db6b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Jul 2021 13:48:06 -0400 Subject: Fix the hierarchy of OpenID providers in the docs. (#10445) --- docs/openid.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'docs') diff --git a/docs/openid.md b/docs/openid.md index cfaafc5015..f685fd551a 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -410,7 +410,7 @@ oidc_providers: display_name_template: "{{ user.name }}" ``` -## Apple +### Apple Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account. -- cgit 1.5.1 From d518b05a8667943bd0aa9ab1edc91eec0a8283fe Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 22 Jul 2021 05:58:24 -0500 Subject: Move dev/ docs to development/ (#10453) --- CONTRIBUTING.md | 2 +- changelog.d/10453.doc | 1 + docs/SUMMARY.md | 6 +- docs/dev/cas.md | 64 --------------- docs/dev/git.md | 148 ---------------------------------- docs/dev/git/branches.jpg | Bin 72228 -> 0 bytes docs/dev/git/clean.png | Bin 110840 -> 0 bytes docs/dev/git/squash.png | Bin 29667 -> 0 bytes docs/dev/saml.md | 41 ---------- docs/development/cas.md | 64 +++++++++++++++ docs/development/git.md | 148 ++++++++++++++++++++++++++++++++++ docs/development/img/git/branches.jpg | Bin 0 -> 72228 bytes docs/development/img/git/clean.png | Bin 0 -> 110840 bytes docs/development/img/git/squash.png | Bin 0 -> 29667 bytes docs/development/saml.md | 41 ++++++++++ 15 files changed, 258 insertions(+), 257 deletions(-) create mode 100644 changelog.d/10453.doc delete mode 100644 docs/dev/cas.md delete mode 100644 docs/dev/git.md delete mode 100644 docs/dev/git/branches.jpg delete mode 100644 docs/dev/git/clean.png delete mode 100644 docs/dev/git/squash.png delete mode 100644 docs/dev/saml.md create mode 100644 docs/development/cas.md create mode 100644 docs/development/git.md create mode 100644 docs/development/img/git/branches.jpg create mode 100644 docs/development/img/git/clean.png create mode 100644 docs/development/img/git/squash.png create mode 100644 docs/development/saml.md (limited to 'docs') diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4e6688042..80ef6aa235 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -392,7 +392,7 @@ By now, you know the drill! # Notes for maintainers on merging PRs etc There are some notes for those with commit access to the project on how we -manage git [here](docs/dev/git.md). +manage git [here](docs/development/git.md). # Conclusion diff --git a/changelog.d/10453.doc b/changelog.d/10453.doc new file mode 100644 index 0000000000..5d4db9bca2 --- /dev/null +++ b/changelog.d/10453.doc @@ -0,0 +1 @@ +Consolidate development documentation to `docs/development/`. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index db4ef1a44e..f1bde91420 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -67,7 +67,7 @@ # Development - [Contributing Guide](development/contributing_guide.md) - [Code Style](code_style.md) - - [Git Usage](dev/git.md) + - [Git Usage](development/git.md) - [Testing]() - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) @@ -77,8 +77,8 @@ - [TCP Replication](tcp_replication.md) - [Internal Documentation](development/internal_documentation/README.md) - [Single Sign-On]() - - [SAML](dev/saml.md) - - [CAS](dev/cas.md) + - [SAML](development/saml.md) + - [CAS](development/cas.md) - [State Resolution]() - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) - [Media Repository](media_repository.md) diff --git a/docs/dev/cas.md b/docs/dev/cas.md deleted file mode 100644 index 592b2d8d4f..0000000000 --- a/docs/dev/cas.md +++ /dev/null @@ -1,64 +0,0 @@ -# How to test CAS as a developer without a server - -The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an -easy to run CAS implementation built on top of Django. - -## Prerequisites - -1. Create a new virtualenv: `python3 -m venv ` -2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate` -3. Install Django and django-mama-cas: - ``` - python -m pip install "django<3" "django-mama-cas==2.4.0" - ``` -4. Create a Django project in the current directory: - ``` - django-admin startproject cas_test . - ``` -5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas -6. Setup the SQLite database: `python manage.py migrate` -7. Create a user: - ``` - python manage.py createsuperuser - ``` - 1. Use whatever you want as the username and password. - 2. Leave the other fields blank. -8. Use the built-in Django test server to serve the CAS endpoints on port 8000: - ``` - python manage.py runserver - ``` - -You should now have a Django project configured to serve CAS authentication with -a single user created. - -## Configure Synapse (and Element) to use CAS - -1. Modify your `homeserver.yaml` to enable CAS and point it to your locally - running Django test server: - ```yaml - cas_config: - enabled: true - server_url: "http://localhost:8000" - service_url: "http://localhost:8081" - #displayname_attribute: name - #required_attributes: - # name: value - ``` -2. Restart Synapse. - -Note that the above configuration assumes the homeserver is running on port 8081 -and that the CAS server is on port 8000, both on localhost. - -## Testing the configuration - -Then in Element: - -1. Visit the login page with a Element pointing at your homeserver. -2. Click the Single Sign-On button. -3. Login using the credentials created with `createsuperuser`. -4. You should be logged in. - -If you want to repeat this process you'll need to manually logout first: - -1. http://localhost:8000/admin/ -2. Click "logout" in the top right. diff --git a/docs/dev/git.md b/docs/dev/git.md deleted file mode 100644 index 87950f07b2..0000000000 --- a/docs/dev/git.md +++ /dev/null @@ -1,148 +0,0 @@ -Some notes on how we use git -============================ - -On keeping the commit history clean ------------------------------------ - -In an ideal world, our git commit history would be a linear progression of -commits each of which contains a single change building on what came -before. Here, by way of an arbitrary example, is the top of `git log --graph -b2dba0607`: - -clean git graph - -Note how the commit comment explains clearly what is changing and why. Also -note the *absence* of merge commits, as well as the absence of commits called -things like (to pick a few culprits): -[“pep8”](https://github.com/matrix-org/synapse/commit/84691da6c), [“fix broken -test”](https://github.com/matrix-org/synapse/commit/474810d9d), -[“oops”](https://github.com/matrix-org/synapse/commit/c9d72e457), -[“typo”](https://github.com/matrix-org/synapse/commit/836358823), or [“Who's -the president?”](https://github.com/matrix-org/synapse/commit/707374d5d). - -There are a number of reasons why keeping a clean commit history is a good -thing: - - * From time to time, after a change lands, it turns out to be necessary to - revert it, or to backport it to a release branch. Those operations are - *much* easier when the change is contained in a single commit. - - * Similarly, it's much easier to answer questions like “is the fix for - `/publicRooms` on the release branch?” if that change consists of a single - commit. - - * Likewise: “what has changed on this branch in the last week?” is much - clearer without merges and “pep8” commits everywhere. - - * Sometimes we need to figure out where a bug got introduced, or some - behaviour changed. One way of doing that is with `git bisect`: pick an - arbitrary commit between the known good point and the known bad point, and - see how the code behaves. However, that strategy fails if the commit you - chose is the middle of someone's epic branch in which they broke the world - before putting it back together again. - -One counterargument is that it is sometimes useful to see how a PR evolved as -it went through review cycles. This is true, but that information is always -available via the GitHub UI (or via the little-known [refs/pull -namespace](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/checking-out-pull-requests-locally)). - - -Of course, in reality, things are more complicated than that. We have release -branches as well as `develop` and `master`, and we deliberately merge changes -between them. Bugs often slip through and have to be fixed later. That's all -fine: this not a cast-iron rule which must be obeyed, but an ideal to aim -towards. - -Merges, squashes, rebases: wtf? -------------------------------- - -Ok, so that's what we'd like to achieve. How do we achieve it? - -The TL;DR is: when you come to merge a pull request, you *probably* want to -“squash and merge”: - -![squash and merge](git/squash.png). - -(This applies whether you are merging your own PR, or that of another -contributor.) - -“Squash and merge”[1](#f1) takes all of the changes in the -PR, and bundles them into a single commit. GitHub gives you the opportunity to -edit the commit message before you confirm, and normally you should do so, -because the default will be useless (again: `* woops typo` is not a useful -thing to keep in the historical record). - -The main problem with this approach comes when you have a series of pull -requests which build on top of one another: as soon as you squash-merge the -first PR, you'll end up with a stack of conflicts to resolve in all of the -others. In general, it's best to avoid this situation in the first place by -trying not to have multiple related PRs in flight at the same time. Still, -sometimes that's not possible and doing a regular merge is the lesser evil. - -Another occasion in which a regular merge makes more sense is a PR where you've -deliberately created a series of commits each of which makes sense in its own -right. For example: [a PR which gradually propagates a refactoring operation -through the codebase](https://github.com/matrix-org/synapse/pull/6837), or [a -PR which is the culmination of several other -PRs](https://github.com/matrix-org/synapse/pull/5987). In this case the ability -to figure out when a particular change/bug was introduced could be very useful. - -Ultimately: **this is not a hard-and-fast-rule**. If in doubt, ask yourself “do -each of the commits I am about to merge make sense in their own right”, but -remember that we're just doing our best to balance “keeping the commit history -clean” with other factors. - -Git branching model -------------------- - -A [lot](https://nvie.com/posts/a-successful-git-branching-model/) -[of](http://scottchacon.com/2011/08/31/github-flow.html) -[words](https://www.endoflineblog.com/gitflow-considered-harmful) have been -written in the past about git branching models (no really, [a -lot](https://martinfowler.com/articles/branching-patterns.html)). I tend to -think the whole thing is overblown. Fundamentally, it's not that -complicated. Here's how we do it. - -Let's start with a picture: - -![branching model](git/branches.jpg) - -It looks complicated, but it's really not. There's one basic rule: *anyone* is -free to merge from *any* more-stable branch to *any* less-stable branch at -*any* time[2](#f2). (The principle behind this is that if a -change is good enough for the more-stable branch, then it's also good enough go -put in a less-stable branch.) - -Meanwhile, merging (or squashing, as per the above) from a less-stable to a -more-stable branch is a deliberate action in which you want to publish a change -or a set of changes to (some subset of) the world: for example, this happens -when a PR is landed, or as part of our release process. - -So, what counts as a more- or less-stable branch? A little reflection will show -that our active branches are ordered thus, from more-stable to less-stable: - - * `master` (tracks our last release). - * `release-vX.Y` (the branch where we prepare the next release)[3](#f3). - * PR branches which are targeting the release. - * `develop` (our "mainline" branch containing our bleeding-edge). - * regular PR branches. - -The corollary is: if you have a bugfix that needs to land in both -`release-vX.Y` *and* `develop`, then you should base your PR on -`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to -`develop`. (If a fix lands in `develop` and we later need it in a -release-branch, we can of course cherry-pick it, but landing it in the release -branch first helps reduce the chance of annoying conflicts.) - ---- - -[1]: “Squash and merge” is GitHub's term for this -operation. Given that there is no merge involved, I'm not convinced it's the -most intuitive name. [^](#a1) - -[2]: Well, anyone with commit access.[^](#a2) - -[3]: Very, very occasionally (I think this has happened once in -the history of Synapse), we've had two releases in flight at once. Obviously, -`release-v1.2` is more-stable than `release-v1.3`. [^](#a3) diff --git a/docs/dev/git/branches.jpg b/docs/dev/git/branches.jpg deleted file mode 100644 index 715ecc8cd0..0000000000 Binary files a/docs/dev/git/branches.jpg and /dev/null differ diff --git a/docs/dev/git/clean.png b/docs/dev/git/clean.png deleted file mode 100644 index 3accd7ccef..0000000000 Binary files a/docs/dev/git/clean.png and /dev/null differ diff --git a/docs/dev/git/squash.png b/docs/dev/git/squash.png deleted file mode 100644 index 234caca3e4..0000000000 Binary files a/docs/dev/git/squash.png and /dev/null differ diff --git a/docs/dev/saml.md b/docs/dev/saml.md deleted file mode 100644 index a9bfd2dc05..0000000000 --- a/docs/dev/saml.md +++ /dev/null @@ -1,41 +0,0 @@ -# How to test SAML as a developer without a server - -https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great -resource for being able to tinker with the SAML options within Synapse without needing to -deploy and configure a complicated software stack. - -To make Synapse (and therefore Riot) use it: - -1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab. -2. Copy the XML to your clipboard. -3. On your Synapse server, create a new file `samling.xml` next to your `homeserver.yaml` with - the XML from step 2 as the contents. -4. Edit your `homeserver.yaml` to include: - ```yaml - saml2_config: - sp_config: - allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388 - metadata: - local: ["samling.xml"] - ``` -5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`: - ```yaml - public_baseurl: http://localhost:8080/ - ``` -6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure - the dependencies are installed and ready to go. -7. Restart Synapse. - -Then in Riot: - -1. Visit the login page with a Riot pointing at your homeserver. -2. Click the Single Sign-On button. -3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`. - The response must also be signed. -4. Click "Next". -5. Click "Post Response" (change nothing). -6. You should be logged in. - -If you try and repeat this process, you may be automatically logged in using the information you -gave previously. To fix this, open your developer console (`F12` or `Ctrl+Shift+I`) while on the -samling page and clear the site data. In Chrome, this will be a button on the Application tab. diff --git a/docs/development/cas.md b/docs/development/cas.md new file mode 100644 index 0000000000..592b2d8d4f --- /dev/null +++ b/docs/development/cas.md @@ -0,0 +1,64 @@ +# How to test CAS as a developer without a server + +The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an +easy to run CAS implementation built on top of Django. + +## Prerequisites + +1. Create a new virtualenv: `python3 -m venv ` +2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate` +3. Install Django and django-mama-cas: + ``` + python -m pip install "django<3" "django-mama-cas==2.4.0" + ``` +4. Create a Django project in the current directory: + ``` + django-admin startproject cas_test . + ``` +5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas +6. Setup the SQLite database: `python manage.py migrate` +7. Create a user: + ``` + python manage.py createsuperuser + ``` + 1. Use whatever you want as the username and password. + 2. Leave the other fields blank. +8. Use the built-in Django test server to serve the CAS endpoints on port 8000: + ``` + python manage.py runserver + ``` + +You should now have a Django project configured to serve CAS authentication with +a single user created. + +## Configure Synapse (and Element) to use CAS + +1. Modify your `homeserver.yaml` to enable CAS and point it to your locally + running Django test server: + ```yaml + cas_config: + enabled: true + server_url: "http://localhost:8000" + service_url: "http://localhost:8081" + #displayname_attribute: name + #required_attributes: + # name: value + ``` +2. Restart Synapse. + +Note that the above configuration assumes the homeserver is running on port 8081 +and that the CAS server is on port 8000, both on localhost. + +## Testing the configuration + +Then in Element: + +1. Visit the login page with a Element pointing at your homeserver. +2. Click the Single Sign-On button. +3. Login using the credentials created with `createsuperuser`. +4. You should be logged in. + +If you want to repeat this process you'll need to manually logout first: + +1. http://localhost:8000/admin/ +2. Click "logout" in the top right. diff --git a/docs/development/git.md b/docs/development/git.md new file mode 100644 index 0000000000..9b1ed54b65 --- /dev/null +++ b/docs/development/git.md @@ -0,0 +1,148 @@ +Some notes on how we use git +============================ + +On keeping the commit history clean +----------------------------------- + +In an ideal world, our git commit history would be a linear progression of +commits each of which contains a single change building on what came +before. Here, by way of an arbitrary example, is the top of `git log --graph +b2dba0607`: + +clean git graph + +Note how the commit comment explains clearly what is changing and why. Also +note the *absence* of merge commits, as well as the absence of commits called +things like (to pick a few culprits): +[“pep8”](https://github.com/matrix-org/synapse/commit/84691da6c), [“fix broken +test”](https://github.com/matrix-org/synapse/commit/474810d9d), +[“oops”](https://github.com/matrix-org/synapse/commit/c9d72e457), +[“typo”](https://github.com/matrix-org/synapse/commit/836358823), or [“Who's +the president?”](https://github.com/matrix-org/synapse/commit/707374d5d). + +There are a number of reasons why keeping a clean commit history is a good +thing: + + * From time to time, after a change lands, it turns out to be necessary to + revert it, or to backport it to a release branch. Those operations are + *much* easier when the change is contained in a single commit. + + * Similarly, it's much easier to answer questions like “is the fix for + `/publicRooms` on the release branch?” if that change consists of a single + commit. + + * Likewise: “what has changed on this branch in the last week?” is much + clearer without merges and “pep8” commits everywhere. + + * Sometimes we need to figure out where a bug got introduced, or some + behaviour changed. One way of doing that is with `git bisect`: pick an + arbitrary commit between the known good point and the known bad point, and + see how the code behaves. However, that strategy fails if the commit you + chose is the middle of someone's epic branch in which they broke the world + before putting it back together again. + +One counterargument is that it is sometimes useful to see how a PR evolved as +it went through review cycles. This is true, but that information is always +available via the GitHub UI (or via the little-known [refs/pull +namespace](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/checking-out-pull-requests-locally)). + + +Of course, in reality, things are more complicated than that. We have release +branches as well as `develop` and `master`, and we deliberately merge changes +between them. Bugs often slip through and have to be fixed later. That's all +fine: this not a cast-iron rule which must be obeyed, but an ideal to aim +towards. + +Merges, squashes, rebases: wtf? +------------------------------- + +Ok, so that's what we'd like to achieve. How do we achieve it? + +The TL;DR is: when you come to merge a pull request, you *probably* want to +“squash and merge”: + +![squash and merge](img/git/squash.png). + +(This applies whether you are merging your own PR, or that of another +contributor.) + +“Squash and merge”[1](#f1) takes all of the changes in the +PR, and bundles them into a single commit. GitHub gives you the opportunity to +edit the commit message before you confirm, and normally you should do so, +because the default will be useless (again: `* woops typo` is not a useful +thing to keep in the historical record). + +The main problem with this approach comes when you have a series of pull +requests which build on top of one another: as soon as you squash-merge the +first PR, you'll end up with a stack of conflicts to resolve in all of the +others. In general, it's best to avoid this situation in the first place by +trying not to have multiple related PRs in flight at the same time. Still, +sometimes that's not possible and doing a regular merge is the lesser evil. + +Another occasion in which a regular merge makes more sense is a PR where you've +deliberately created a series of commits each of which makes sense in its own +right. For example: [a PR which gradually propagates a refactoring operation +through the codebase](https://github.com/matrix-org/synapse/pull/6837), or [a +PR which is the culmination of several other +PRs](https://github.com/matrix-org/synapse/pull/5987). In this case the ability +to figure out when a particular change/bug was introduced could be very useful. + +Ultimately: **this is not a hard-and-fast-rule**. If in doubt, ask yourself “do +each of the commits I am about to merge make sense in their own right”, but +remember that we're just doing our best to balance “keeping the commit history +clean” with other factors. + +Git branching model +------------------- + +A [lot](https://nvie.com/posts/a-successful-git-branching-model/) +[of](http://scottchacon.com/2011/08/31/github-flow.html) +[words](https://www.endoflineblog.com/gitflow-considered-harmful) have been +written in the past about git branching models (no really, [a +lot](https://martinfowler.com/articles/branching-patterns.html)). I tend to +think the whole thing is overblown. Fundamentally, it's not that +complicated. Here's how we do it. + +Let's start with a picture: + +![branching model](img/git/branches.jpg) + +It looks complicated, but it's really not. There's one basic rule: *anyone* is +free to merge from *any* more-stable branch to *any* less-stable branch at +*any* time[2](#f2). (The principle behind this is that if a +change is good enough for the more-stable branch, then it's also good enough go +put in a less-stable branch.) + +Meanwhile, merging (or squashing, as per the above) from a less-stable to a +more-stable branch is a deliberate action in which you want to publish a change +or a set of changes to (some subset of) the world: for example, this happens +when a PR is landed, or as part of our release process. + +So, what counts as a more- or less-stable branch? A little reflection will show +that our active branches are ordered thus, from more-stable to less-stable: + + * `master` (tracks our last release). + * `release-vX.Y` (the branch where we prepare the next release)[3](#f3). + * PR branches which are targeting the release. + * `develop` (our "mainline" branch containing our bleeding-edge). + * regular PR branches. + +The corollary is: if you have a bugfix that needs to land in both +`release-vX.Y` *and* `develop`, then you should base your PR on +`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to +`develop`. (If a fix lands in `develop` and we later need it in a +release-branch, we can of course cherry-pick it, but landing it in the release +branch first helps reduce the chance of annoying conflicts.) + +--- + +[1]: “Squash and merge” is GitHub's term for this +operation. Given that there is no merge involved, I'm not convinced it's the +most intuitive name. [^](#a1) + +[2]: Well, anyone with commit access.[^](#a2) + +[3]: Very, very occasionally (I think this has happened once in +the history of Synapse), we've had two releases in flight at once. Obviously, +`release-v1.2` is more-stable than `release-v1.3`. [^](#a3) diff --git a/docs/development/img/git/branches.jpg b/docs/development/img/git/branches.jpg new file mode 100644 index 0000000000..715ecc8cd0 Binary files /dev/null and b/docs/development/img/git/branches.jpg differ diff --git a/docs/development/img/git/clean.png b/docs/development/img/git/clean.png new file mode 100644 index 0000000000..3accd7ccef Binary files /dev/null and b/docs/development/img/git/clean.png differ diff --git a/docs/development/img/git/squash.png b/docs/development/img/git/squash.png new file mode 100644 index 0000000000..234caca3e4 Binary files /dev/null and b/docs/development/img/git/squash.png differ diff --git a/docs/development/saml.md b/docs/development/saml.md new file mode 100644 index 0000000000..a9bfd2dc05 --- /dev/null +++ b/docs/development/saml.md @@ -0,0 +1,41 @@ +# How to test SAML as a developer without a server + +https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great +resource for being able to tinker with the SAML options within Synapse without needing to +deploy and configure a complicated software stack. + +To make Synapse (and therefore Riot) use it: + +1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab. +2. Copy the XML to your clipboard. +3. On your Synapse server, create a new file `samling.xml` next to your `homeserver.yaml` with + the XML from step 2 as the contents. +4. Edit your `homeserver.yaml` to include: + ```yaml + saml2_config: + sp_config: + allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388 + metadata: + local: ["samling.xml"] + ``` +5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`: + ```yaml + public_baseurl: http://localhost:8080/ + ``` +6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure + the dependencies are installed and ready to go. +7. Restart Synapse. + +Then in Riot: + +1. Visit the login page with a Riot pointing at your homeserver. +2. Click the Single Sign-On button. +3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`. + The response must also be signed. +4. Click "Next". +5. Click "Post Response" (change nothing). +6. You should be logged in. + +If you try and repeat this process, you may be automatically logged in using the information you +gave previously. To fix this, open your developer console (`F12` or `Ctrl+Shift+I`) while on the +samling page and clear the site data. In Chrome, this will be a button on the Application tab. -- cgit 1.5.1 From 89c4ca81bb597159e456449c548ba3f166843ddc Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 22 Jul 2021 16:05:16 +0200 Subject: Add `creation_ts` to list users admin API (#10448) Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/10448.feature | 1 + docs/admin_api/user_admin_api.md | 10 +++++-- synapse/rest/admin/users.py | 2 ++ synapse/storage/databases/main/__init__.py | 19 +++++-------- synapse/storage/databases/main/stats.py | 2 ++ tests/rest/admin/test_user.py | 45 ++++++++++++++++++------------ 6 files changed, 46 insertions(+), 33 deletions(-) create mode 100644 changelog.d/10448.feature (limited to 'docs') diff --git a/changelog.d/10448.feature b/changelog.d/10448.feature new file mode 100644 index 0000000000..f6579e0ca8 --- /dev/null +++ b/changelog.d/10448.feature @@ -0,0 +1 @@ +Add `creation_ts` to list users admin API. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 4a65d0c3bc..160899754e 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -144,7 +144,8 @@ A response body like the following is returned: "deactivated": 0, "shadow_banned": 0, "displayname": "", - "avatar_url": null + "avatar_url": null, + "creation_ts": 1560432668000 }, { "name": "", "is_guest": 0, @@ -153,7 +154,8 @@ A response body like the following is returned: "deactivated": 0, "shadow_banned": 0, "displayname": "", - "avatar_url": "" + "avatar_url": "", + "creation_ts": 1561550621000 } ], "next_token": "100", @@ -197,11 +199,12 @@ The following parameters should be set in the URL: - `shadow_banned` - Users are ordered by `shadow_banned` status. - `displayname` - Users are ordered alphabetically by `displayname`. - `avatar_url` - Users are ordered alphabetically by avatar URL. + - `creation_ts` - Users are ordered by when the users was created in ms. - `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. Setting this value to `b` will reverse the above sort order. Defaults to `f`. -Caution. The database only has indexes on the columns `name` and `created_ts`. +Caution. The database only has indexes on the columns `name` and `creation_ts`. This means that if a different sort order is used (`is_guest`, `admin`, `user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`), this can cause a large load on the database, especially for large environments. @@ -222,6 +225,7 @@ The following fields are returned in the JSON response body: - `shadow_banned` - bool - Status if that user has been marked as shadow banned. - `displayname` - string - The user's display name if they have set one. - `avatar_url` - string - The user's avatar URL if they have set one. + - `creation_ts` - integer - The user's creation timestamp in ms. - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of media. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 6736536172..eef76ab18a 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -62,6 +62,7 @@ class UsersRestServletV2(RestServlet): The parameter `name` can be used to filter by user id or display name. The parameter `guests` can be used to exclude guest users. The parameter `deactivated` can be used to include deactivated users. + The parameter `order_by` can be used to order the result. """ def __init__(self, hs: "HomeServer"): @@ -108,6 +109,7 @@ class UsersRestServletV2(RestServlet): UserSortOrder.USER_TYPE.value, UserSortOrder.AVATAR_URL.value, UserSortOrder.SHADOW_BANNED.value, + UserSortOrder.CREATION_TS.value, ), ) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index bacfbce4af..8d9f07111d 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -297,27 +297,22 @@ class DataStore( where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" - sql_base = """ + sql_base = f""" FROM users as u LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? - {} - """.format( - where_clause - ) + {where_clause} + """ sql = "SELECT COUNT(*) as total_users " + sql_base txn.execute(sql, args) count = txn.fetchone()[0] - sql = """ - SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url + sql = f""" + SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, + displayname, avatar_url, creation_ts * 1000 as creation_ts {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? - """.format( - sql_base=sql_base, - order_by_column=order_by_column, - order=order, - ) + """ args += [limit, start] txn.execute(sql, args) users = self.db_pool.cursor_to_dict(txn) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 889e0d3625..42edbcc057 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -75,6 +75,7 @@ class UserSortOrder(Enum): USER_TYPE = ordered alphabetically by `user_type` AVATAR_URL = ordered alphabetically by `avatar_url` SHADOW_BANNED = ordered by `shadow_banned` + CREATION_TS = ordered by `creation_ts` """ MEDIA_LENGTH = "media_length" @@ -88,6 +89,7 @@ class UserSortOrder(Enum): USER_TYPE = "user_type" AVATAR_URL = "avatar_url" SHADOW_BANNED = "shadow_banned" + CREATION_TS = "creation_ts" class StatsStore(StateDeltasStore): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 4fccce34fd..42f50c0921 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -473,7 +473,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self): @@ -485,7 +485,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, access_token=other_user_token) - self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_all_users(self): @@ -497,11 +497,11 @@ class UsersListTestCase(unittest.HomeserverTestCase): channel = self.make_request( "GET", self.url + "?deactivated=true", - b"{}", + {}, access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body["users"])) self.assertEqual(3, channel.json_body["total"]) @@ -532,7 +532,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): ) channel = self.make_request( "GET", - url.encode("ascii"), + url, access_token=self.admin_user_tok, ) self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) @@ -598,7 +598,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -608,7 +608,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid guests @@ -618,7 +618,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # invalid deactivated @@ -628,7 +628,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # unkown order_by @@ -648,7 +648,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) def test_limit(self): @@ -666,7 +666,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 5) self.assertEqual(channel.json_body["next_token"], "5") @@ -687,7 +687,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -708,7 +708,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["users"]), 10) @@ -731,7 +731,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -744,7 +744,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -757,7 +757,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 19) self.assertEqual(channel.json_body["next_token"], "19") @@ -771,7 +771,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -781,7 +781,10 @@ class UsersListTestCase(unittest.HomeserverTestCase): Testing order list with parameter `order_by` """ + # make sure that the users do not have the same timestamps + self.reactor.advance(10) user1 = self.register_user("user1", "pass1", admin=False, displayname="Name Z") + self.reactor.advance(10) user2 = self.register_user("user2", "pass2", admin=False, displayname="Name Y") # Modify user @@ -841,6 +844,11 @@ class UsersListTestCase(unittest.HomeserverTestCase): self._order_test([self.admin_user, user2, user1], "avatar_url", "f") self._order_test([user1, user2, self.admin_user], "avatar_url", "b") + # order by creation_ts + self._order_test([self.admin_user, user1, user2], "creation_ts") + self._order_test([self.admin_user, user1, user2], "creation_ts", "f") + self._order_test([user2, user1, self.admin_user], "creation_ts", "b") + def _order_test( self, expected_user_list: List[str], @@ -863,7 +871,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): url += "dir=%s" % (dir,) channel = self.make_request( "GET", - url.encode("ascii"), + url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -887,6 +895,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertIn("shadow_banned", u) self.assertIn("displayname", u) self.assertIn("avatar_url", u) + self.assertIn("creation_ts", u) def _create_users(self, number_users: int): """ -- cgit 1.5.1 From e16eab29d671504144f4185d4738e5bfd7a3a2c6 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:32:05 +0100 Subject: Add a PeriodicallyFlushingMemoryHandler to prevent logging silence (#10407) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/10407.feature | 1 + docs/sample_log_config.yaml | 5 ++- synapse/config/logger.py | 5 ++- synapse/logging/handlers.py | 88 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10407.feature create mode 100644 synapse/logging/handlers.py (limited to 'docs') diff --git a/changelog.d/10407.feature b/changelog.d/10407.feature new file mode 100644 index 0000000000..db277d9ecd --- /dev/null +++ b/changelog.d/10407.feature @@ -0,0 +1 @@ +Add a buffered logging handler which periodically flushes itself. diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 669e600081..b088c83405 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -28,7 +28,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -36,6 +36,9 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index ad4e6e61c3..dcd3ed1dac 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -71,7 +71,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -79,6 +79,9 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py new file mode 100644 index 0000000000..a6c212f300 --- /dev/null +++ b/synapse/logging/handlers.py @@ -0,0 +1,88 @@ +import logging +import time +from logging import Handler, LogRecord +from logging.handlers import MemoryHandler +from threading import Thread +from typing import Optional + +from twisted.internet.interfaces import IReactorCore + + +class PeriodicallyFlushingMemoryHandler(MemoryHandler): + """ + This is a subclass of MemoryHandler that additionally spawns a background + thread to periodically flush the buffer. + + This prevents messages from being buffered for too long. + + Additionally, all messages will be immediately flushed if the reactor has + not yet been started. + """ + + def __init__( + self, + capacity: int, + flushLevel: int = logging.ERROR, + target: Optional[Handler] = None, + flushOnClose: bool = True, + period: float = 5.0, + reactor: Optional[IReactorCore] = None, + ) -> None: + """ + period: the period between automatic flushes + + reactor: if specified, a custom reactor to use. If not specifies, + defaults to the globally-installed reactor. + Log entries will be flushed immediately until this reactor has + started. + """ + super().__init__(capacity, flushLevel, target, flushOnClose) + + self._flush_period: float = period + self._active: bool = True + self._reactor_started = False + + self._flushing_thread: Thread = Thread( + name="PeriodicallyFlushingMemoryHandler flushing thread", + target=self._flush_periodically, + ) + self._flushing_thread.start() + + def on_reactor_running(): + self._reactor_started = True + + reactor_to_use: IReactorCore + if reactor is None: + from twisted.internet import reactor as global_reactor + + reactor_to_use = global_reactor # type: ignore[assignment] + else: + reactor_to_use = reactor + + # call our hook when the reactor start up + reactor_to_use.callWhenRunning(on_reactor_running) + + def shouldFlush(self, record: LogRecord) -> bool: + """ + Before reactor start-up, log everything immediately. + Otherwise, fall back to original behaviour of waiting for the buffer to fill. + """ + + if self._reactor_started: + return super().shouldFlush(record) + else: + return True + + def _flush_periodically(self): + """ + Whilst this handler is active, flush the handler periodically. + """ + + while self._active: + # flush is thread-safe; it acquires and releases the lock internally + self.flush() + time.sleep(self._flush_period) + + def close(self) -> None: + self._active = False + super().close() -- cgit 1.5.1 From ba5287f5e8be150551824493b3ad685dde00a543 Mon Sep 17 00:00:00 2001 From: Toni Spets Date: Mon, 2 Aug 2021 16:24:43 +0300 Subject: Allow setting transaction limit for db connections (#10440) Setting the value will help PostgreSQL free up memory by recycling the connections in the connection pool. Signed-off-by: Toni Spets --- changelog.d/10440.feature | 1 + docs/sample_config.yaml | 4 ++++ synapse/config/database.py | 4 ++++ synapse/storage/database.py | 21 +++++++++++++++++++++ tests/storage/test_txn_limit.py | 36 ++++++++++++++++++++++++++++++++++++ tests/utils.py | 3 +++ 6 files changed, 69 insertions(+) create mode 100644 changelog.d/10440.feature create mode 100644 tests/storage/test_txn_limit.py (limited to 'docs') diff --git a/changelog.d/10440.feature b/changelog.d/10440.feature new file mode 100644 index 0000000000..f1833b0bd7 --- /dev/null +++ b/changelog.d/10440.feature @@ -0,0 +1 @@ +Allow setting transaction limit for database connections. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 853c2f6899..1a217f35db 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -720,6 +720,9 @@ caches: # 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or # 'psycopg2' (for PostgreSQL). # +# 'txn_limit' gives the maximum number of transactions to run per connection +# before reconnecting. Defaults to 0, which means no limit. +# # 'args' gives options which are passed through to the database engine, # except for options starting 'cp_', which are used to configure the Twisted # connection pool. For a reference to valid arguments, see: @@ -740,6 +743,7 @@ caches: # #database: # name: psycopg2 +# txn_limit: 10000 # args: # user: synapse_user # password: secretpassword diff --git a/synapse/config/database.py b/synapse/config/database.py index 3d7d92f615..651e31b576 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -33,6 +33,9 @@ DEFAULT_CONFIG = """\ # 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or # 'psycopg2' (for PostgreSQL). # +# 'txn_limit' gives the maximum number of transactions to run per connection +# before reconnecting. Defaults to 0, which means no limit. +# # 'args' gives options which are passed through to the database engine, # except for options starting 'cp_', which are used to configure the Twisted # connection pool. For a reference to valid arguments, see: @@ -53,6 +56,7 @@ DEFAULT_CONFIG = """\ # #database: # name: psycopg2 +# txn_limit: 10000 # args: # user: synapse_user # password: secretpassword diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 4d4643619f..c8015a3848 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -15,6 +15,7 @@ # limitations under the License. import logging import time +from collections import defaultdict from sys import intern from time import monotonic as monotonic_time from typing import ( @@ -397,6 +398,7 @@ class DatabasePool: ): self.hs = hs self._clock = hs.get_clock() + self._txn_limit = database_config.config.get("txn_limit", 0) self._database_config = database_config self._db_pool = make_pool(hs.get_reactor(), database_config, engine) @@ -406,6 +408,9 @@ class DatabasePool: self._current_txn_total_time = 0.0 self._previous_loop_ts = 0.0 + # Transaction counter: key is the twisted thread id, value is the current count + self._txn_counters: Dict[int, int] = defaultdict(int) + # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends # to watch it @@ -750,10 +755,26 @@ class DatabasePool: sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) + if self._txn_limit > 0: + tid = self._db_pool.threadID() + self._txn_counters[tid] += 1 + + if self._txn_counters[tid] > self._txn_limit: + logger.debug( + "Reconnecting database connection over transaction limit" + ) + conn.reconnect() + opentracing.log_kv( + {"message": "reconnected due to txn limit"} + ) + self._txn_counters[tid] = 1 + if self.engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() opentracing.log_kv({"message": "reconnected"}) + if self._txn_limit > 0: + self._txn_counters[tid] = 1 try: if db_autocommit: diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py new file mode 100644 index 0000000000..9be51f9ebd --- /dev/null +++ b/tests/storage/test_txn_limit.py @@ -0,0 +1,36 @@ +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests import unittest + + +class SQLTransactionLimitTestCase(unittest.HomeserverTestCase): + """Test SQL transaction limit doesn't break transactions.""" + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver(db_txn_limit=1000) + + def test_config(self): + db_config = self.hs.config.get_single_database() + self.assertEqual(db_config.config["txn_limit"], 1000) + + def test_select(self): + def do_select(txn): + txn.execute("SELECT 1") + + db_pool = self.hs.get_datastores().databases[0] + + # force txn limit to roll over at least once + for i in range(0, 1001): + self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) diff --git a/tests/utils.py b/tests/utils.py index 6bd008dcfe..f3458ca88d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -239,6 +239,9 @@ def setup_test_homeserver( "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1}, } + if "db_txn_limit" in kwargs: + database_config["txn_limit"] = kwargs["db_txn_limit"] + database = DatabaseConnectionConfig("master", database_config) config.database.databases = [database] -- cgit 1.5.1