Skip to content

Instantly share code, notes, and snippets.

@andrewshadura
Last active May 23, 2019 07:25
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save andrewshadura/b68080090041cf9ed8538e14c895a0a7 to your computer and use it in GitHub Desktop.
Save andrewshadura/b68080090041cf9ed8538e14c895a0a7 to your computer and use it in GitHub Desktop.
Diff between matrix-synapse 0.99.2-5 and 0.99.5.1-1, patches applied
This file has been truncated, but you can view the full file.
diff --git a/.buildkite/.env b/.buildkite/.env
new file mode 100644
index 0000000..85b102d
--- /dev/null
+++ b/.buildkite/.env
@@ -0,0 +1,13 @@
+CI
+BUILDKITE
+BUILDKITE_BUILD_NUMBER
+BUILDKITE_BRANCH
+BUILDKITE_BUILD_NUMBER
+BUILDKITE_JOB_ID
+BUILDKITE_BUILD_URL
+BUILDKITE_PROJECT_SLUG
+BUILDKITE_COMMIT
+BUILDKITE_PULL_REQUEST
+BUILDKITE_TAG
+CODECOV_TOKEN
+TRIAL_FLAGS
diff --git a/.buildkite/docker-compose.py27.pg94.yaml b/.buildkite/docker-compose.py27.pg94.yaml
new file mode 100644
index 0000000..2d4b9ea
--- /dev/null
+++ b/.buildkite/docker-compose.py27.pg94.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:9.4
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:2.7
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/docker-compose.py27.pg95.yaml b/.buildkite/docker-compose.py27.pg95.yaml
new file mode 100644
index 0000000..c6a41f1
--- /dev/null
+++ b/.buildkite/docker-compose.py27.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:9.5
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:2.7
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/docker-compose.py35.pg94.yaml b/.buildkite/docker-compose.py35.pg94.yaml
new file mode 100644
index 0000000..978aedd
--- /dev/null
+++ b/.buildkite/docker-compose.py35.pg94.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:9.4
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:3.5
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
new file mode 100644
index 0000000..2f14387
--- /dev/null
+++ b/.buildkite/docker-compose.py35.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:9.5
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:3.5
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
new file mode 100644
index 0000000..f3eec05
--- /dev/null
+++ b/.buildkite/docker-compose.py37.pg11.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:11
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:3.7
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
new file mode 100644
index 0000000..2a41db8
--- /dev/null
+++ b/.buildkite/docker-compose.py37.pg95.yaml
@@ -0,0 +1,21 @@
+version: '3.1'
+
+services:
+
+ postgres:
+ image: postgres:9.5
+ environment:
+ POSTGRES_PASSWORD: postgres
+
+ testenv:
+ image: python:3.7
+ depends_on:
+ - postgres
+ env_file: .env
+ environment:
+ SYNAPSE_POSTGRES_HOST: postgres
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ working_dir: /app
+ volumes:
+ - ..:/app
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
new file mode 100644
index 0000000..44b258d
--- /dev/null
+++ b/.buildkite/pipeline.yml
@@ -0,0 +1,168 @@
+env:
+ CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
+
+steps:
+ - command:
+ - "python -m pip install tox"
+ - "tox -e pep8"
+ label: "\U0001F9F9 PEP-8"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e packaging"
+ label: "\U0001F9F9 packaging"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e check_isort"
+ label: "\U0001F9F9 isort"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+
+ - command:
+ - "python -m pip install tox"
+ - "scripts-dev/check-newsfragment"
+ label: ":newspaper: Newsfile"
+ branches: "!master !develop !release-*"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ propagate-environment: true
+
+ - wait
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e check-sampleconfig"
+ label: "\U0001F9F9 check-sample-config"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py27,codecov"
+ label: ":python: 2.7 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:2.7"
+ propagate-environment: true
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py35,codecov"
+ label: ":python: 3.5 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.5"
+ propagate-environment: true
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py36,codecov"
+ label: ":python: 3.6 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ propagate-environment: true
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py37,codecov"
+ label: ":python: 3.7 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.7"
+ propagate-environment: true
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py27-old,codecov"
+ label: ":python: 2.7 / SQLite / Old Deps"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:2.7"
+ propagate-environment: true
+
+ - label: ":python: 2.7 / :postgres: 9.4"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py27.pg94.yaml
+
+ - label: ":python: 2.7 / :postgres: 9.5"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py27.pg95.yaml
+
+ - label: ":python: 3.5 / :postgres: 9.4"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py35.pg94.yaml
+
+ - label: ":python: 3.5 / :postgres: 9.5"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py35.pg95.yaml
+
+ - label: ":python: 3.7 / :postgres: 9.5"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py37.pg95.yaml
+
+ - label: ":python: 3.7 / :postgres: 11"
+ env:
+ TRIAL_FLAGS: "-j 4"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py37.pg11.yaml
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
index 756759c..5cf844b 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.md
@@ -4,9 +4,9 @@ about: Create a report to help us improve
---
-<!--
+<!--
-**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
+**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
@@ -17,7 +17,7 @@ the necessary data to fix your issue.
You can also preview your report before submitting it. You may remove sections
that aren't relevant to your particular case.
-Text between <!-- and --> marks will be invisible in the report.
+Text between <!-- and --​> marks will be invisible in the report.
-->
@@ -31,7 +31,7 @@ Text between <!-- and --> marks will be invisible in the report.
- that reproduce the bug
- using hyphens as bullet points
-<!--
+<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from _homeserver.log_, please include
@@ -48,8 +48,8 @@ those (please be careful to remove any personal or private data). Please surroun
If not matrix.org:
-<!--
-What version of Synapse is running?
+<!--
+What version of Synapse is running?
You can find the Synapse version by inspecting the server headers (replace matrix.org with
your own homeserver domain):
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
diff --git a/.gitignore b/.gitignore
index a20f3e6..a84c41b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,11 +12,15 @@ _trial_temp/
_trial_temp*/
# stuff that is likely to exist when you run a server locally
+/*.db
+/*.log
+/*.log.config
+/*.pid
/*.signing.key
-/*.tls.crt
-/*.tls.key
-/uploads
+/env/
+/homeserver*.yaml
/media_store/
+/uploads
# IDEs
/.idea/
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 0d0fa70..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-dist: xenial
-language: python
-
-cache:
- directories:
- # we only bother to cache the wheels; parts of the http cache get
- # invalidated every build (because they get served with a max-age of 600
- # seconds), which means that we end up re-uploading the whole cache for
- # every build, which is time-consuming In any case, it's not obvious that
- # downloading the cache from S3 would be much faster than downloading the
- # originals from pypi.
- #
- - $HOME/.cache/pip/wheels
-
-# don't clone the whole repo history, one commit will do
-git:
- depth: 1
-
-# only build branches we care about (PRs are built seperately)
-branches:
- only:
- - master
- - develop
- - /^release-v/
- - rav/pg95
-
-# When running the tox environments that call Twisted Trial, we can pass the -j
-# flag to run the tests concurrently. We set this to 2 for CPU bound tests
-# (SQLite) and 4 for I/O bound tests (PostgreSQL).
-matrix:
- fast_finish: true
- include:
- - name: "pep8"
- python: 3.6
- env: TOX_ENV="pep8,check_isort,packaging"
-
- - name: "py2.7 / sqlite"
- python: 2.7
- env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
-
- - name: "py2.7 / sqlite / olddeps"
- python: 2.7
- env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
-
- - name: "py2.7 / postgres9.5"
- python: 2.7
- addons:
- postgresql: "9.5"
- env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
- services:
- - postgresql
-
- - name: "py3.5 / sqlite"
- python: 3.5
- env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
-
- - name: "py3.7 / sqlite"
- python: 3.7
- env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
-
- - name: "py3.7 / postgres9.4"
- python: 3.7
- addons:
- postgresql: "9.4"
- env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
- services:
- - postgresql
-
- - name: "py3.7 / postgres9.5"
- python: 3.7
- addons:
- postgresql: "9.5"
- env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
- services:
- - postgresql
-
- - # we only need to check for the newsfragment if it's a PR build
- if: type = pull_request
- name: "check-newsfragment"
- python: 3.6
- script: scripts-dev/check-newsfragment
-
-install:
- # this just logs the postgres version we will be testing against (if any)
- - psql -At -U postgres -c 'select version();' || true
-
- - pip install tox
-
- # if we don't have python3.6 in this environment, travis unhelpfully gives us
- # a `python3.6` on our path which does nothing but spit out a warning. Tox
- # tries to run it (even if we're not running a py36 env), so the build logs
- # then have warnings which look like errors. To reduce the noise, remove the
- # non-functional python3.6.
- - ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
-
-script:
- - tox -e $TOX_ENV
diff --git a/AUTHORS.rst b/AUTHORS.rst
index d599aec..3ea18ee 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -69,3 +69,6 @@ Serban Constantin <serban.constantin at gmail dot com>
Jason Robinson <jasonr at matrix.org>
* Minor fixes
+
+Joseph Weston <joseph at weston.cloud>
+ + Add admin API for querying HS version
diff --git a/CHANGES.md b/CHANGES.md
index b25775d..6bdfdd6 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,260 @@
+Synapse 0.99.5.1 (2019-05-22)
+=============================
+
+No significant changes.
+
+
+Synapse 0.99.5 (2019-05-22)
+===========================
+
+No significant changes.
+
+
+Synapse 0.99.5rc1 (2019-05-21)
+==============================
+
+Features
+--------
+
+- Add ability to blacklist IP ranges for the federation client. ([\#5043](https://github.com/matrix-org/synapse/issues/5043))
+- Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. ([\#5181](https://github.com/matrix-org/synapse/issues/5181))
+- Drop support for the undocumented /_matrix/client/v2_alpha API prefix. ([\#5190](https://github.com/matrix-org/synapse/issues/5190))
+- Add an option to disable per-room profiles. ([\#5196](https://github.com/matrix-org/synapse/issues/5196))
+- Stick an expiration date to any registered user missing one at startup if account validity is enabled. ([\#5204](https://github.com/matrix-org/synapse/issues/5204))
+- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211), [\#5203](https://github.com/matrix-org/synapse/issues/5203), [\#5212](https://github.com/matrix-org/synapse/issues/5212))
+- Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). ([\#5210](https://github.com/matrix-org/synapse/issues/5210), [\#5217](https://github.com/matrix-org/synapse/issues/5217))
+
+
+Bugfixes
+--------
+
+- Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. ([\#5039](https://github.com/matrix-org/synapse/issues/5039))
+- Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. ([\#5146](https://github.com/matrix-org/synapse/issues/5146))
+- Re-order stages in registration flows such that msisdn and email verification are done last. ([\#5174](https://github.com/matrix-org/synapse/issues/5174))
+- Fix 3pid guest invites. ([\#5177](https://github.com/matrix-org/synapse/issues/5177))
+- Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. ([\#5187](https://github.com/matrix-org/synapse/issues/5187))
+- Prevent registration for user ids that are too long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198))
+- Fix incompatibility between ACME support and Python 3.5.2. ([\#5218](https://github.com/matrix-org/synapse/issues/5218))
+- Fix error handling for rooms whose versions are unknown. ([\#5219](https://github.com/matrix-org/synapse/issues/5219))
+
+
+Internal Changes
+----------------
+
+- Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. ([\#3484](https://github.com/matrix-org/synapse/issues/3484))
+- Update tests to consistently be configured via the same code that is used when loading from configuration files. ([\#5171](https://github.com/matrix-org/synapse/issues/5171), [\#5185](https://github.com/matrix-org/synapse/issues/5185))
+- Allow client event serialization to be async. ([\#5183](https://github.com/matrix-org/synapse/issues/5183))
+- Expose DataStore._get_events as get_events_as_list. ([\#5184](https://github.com/matrix-org/synapse/issues/5184))
+- Make generating SQL bounds for pagination generic. ([\#5191](https://github.com/matrix-org/synapse/issues/5191))
+- Stop telling people to install the optional dependencies by default. ([\#5197](https://github.com/matrix-org/synapse/issues/5197))
+
+
+Synapse 0.99.4 (2019-05-15)
+===========================
+
+No significant changes.
+
+
+Synapse 0.99.4rc1 (2019-05-13)
+==============================
+
+Features
+--------
+
+- Add systemd-python to the optional dependencies to enable logging to the systemd journal. Install with `pip install matrix-synapse[systemd]`. ([\#4339](https://github.com/matrix-org/synapse/issues/4339))
+- Add a default .m.rule.tombstone push rule. ([\#4867](https://github.com/matrix-org/synapse/issues/4867))
+- Add ability for password provider modules to bind email addresses to users upon registration. ([\#4947](https://github.com/matrix-org/synapse/issues/4947))
+- Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. ([\#4967](https://github.com/matrix-org/synapse/issues/4967))
+- Remove presence list support as per MSC 1819. ([\#4989](https://github.com/matrix-org/synapse/issues/4989))
+- Reduce CPU usage starting pushers during start up. ([\#4991](https://github.com/matrix-org/synapse/issues/4991))
+- Add a delete group admin API. ([\#5002](https://github.com/matrix-org/synapse/issues/5002))
+- Add config option to block users from looking up 3PIDs. ([\#5010](https://github.com/matrix-org/synapse/issues/5010))
+- Add context to phonehome stats. ([\#5020](https://github.com/matrix-org/synapse/issues/5020))
+- Configure the example systemd units to have a log identifier of `matrix-synapse`
+ instead of the executable name, `python`.
+ Contributed by Christoph Müller. ([\#5023](https://github.com/matrix-org/synapse/issues/5023))
+- Add time-based account expiration. ([\#5027](https://github.com/matrix-org/synapse/issues/5027), [\#5047](https://github.com/matrix-org/synapse/issues/5047), [\#5073](https://github.com/matrix-org/synapse/issues/5073), [\#5116](https://github.com/matrix-org/synapse/issues/5116))
+- Add support for handling `/versions`, `/voip` and `/push_rules` client endpoints to client_reader worker. ([\#5063](https://github.com/matrix-org/synapse/issues/5063), [\#5065](https://github.com/matrix-org/synapse/issues/5065), [\#5070](https://github.com/matrix-org/synapse/issues/5070))
+- Add a configuration option to require authentication on /publicRooms and /profile endpoints. ([\#5083](https://github.com/matrix-org/synapse/issues/5083))
+- Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). ([\#5119](https://github.com/matrix-org/synapse/issues/5119))
+- Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. ([\#5121](https://github.com/matrix-org/synapse/issues/5121), [\#5142](https://github.com/matrix-org/synapse/issues/5142))
+
+
+Bugfixes
+--------
+
+- Avoid redundant URL encoding of redirect URL for SSO login in the fallback login page. Fixes a regression introduced in [#4220](https://github.com/matrix-org/synapse/pull/4220). Contributed by Marcel Fabian Krüger ("[zaugin](https://github.com/zauguin)"). ([\#4555](https://github.com/matrix-org/synapse/issues/4555))
+- Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. ([\#4942](https://github.com/matrix-org/synapse/issues/4942), [\#5103](https://github.com/matrix-org/synapse/issues/5103))
+- Fix sync bug which made accepting invites unreliable in worker-mode synapses. ([\#4955](https://github.com/matrix-org/synapse/issues/4955), [\#4956](https://github.com/matrix-org/synapse/issues/4956))
+- start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. ([\#4981](https://github.com/matrix-org/synapse/issues/4981))
+- Transfer related groups on room upgrade. ([\#4990](https://github.com/matrix-org/synapse/issues/4990))
+- Prevent the ability to kick users from a room they aren't in. ([\#4999](https://github.com/matrix-org/synapse/issues/4999))
+- Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud <anders@jensenwaud.com>. ([\#5003](https://github.com/matrix-org/synapse/issues/5003))
+- Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. ([\#5009](https://github.com/matrix-org/synapse/issues/5009))
+- Fix "cannot import name execute_batch" error with postgres. ([\#5032](https://github.com/matrix-org/synapse/issues/5032))
+- Fix disappearing exceptions in manhole. ([\#5035](https://github.com/matrix-org/synapse/issues/5035))
+- Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. ([\#5037](https://github.com/matrix-org/synapse/issues/5037))
+- Make sure we're not registering the same 3pid twice on registration. ([\#5071](https://github.com/matrix-org/synapse/issues/5071))
+- Don't crash on lack of expiry templates. ([\#5077](https://github.com/matrix-org/synapse/issues/5077))
+- Fix the ratelimiting on third party invites. ([\#5104](https://github.com/matrix-org/synapse/issues/5104))
+- Add some missing limitations to room alias creation. ([\#5124](https://github.com/matrix-org/synapse/issues/5124), [\#5128](https://github.com/matrix-org/synapse/issues/5128))
+- Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! ([\#5138](https://github.com/matrix-org/synapse/issues/5138))
+
+Internal Changes
+----------------
+
+- Add test to verify threepid auth check added in #4435. ([\#4474](https://github.com/matrix-org/synapse/issues/4474))
+- Fix/improve some docstrings in the replication code. ([\#4949](https://github.com/matrix-org/synapse/issues/4949))
+- Split synapse.replication.tcp.streams into smaller files. ([\#4953](https://github.com/matrix-org/synapse/issues/4953))
+- Refactor replication row generation/parsing. ([\#4954](https://github.com/matrix-org/synapse/issues/4954))
+- Run `black` to clean up formatting on `synapse/storage/roommember.py` and `synapse/storage/events.py`. ([\#4959](https://github.com/matrix-org/synapse/issues/4959))
+- Remove log line for password via the admin API. ([\#4965](https://github.com/matrix-org/synapse/issues/4965))
+- Fix typo in TLS filenames in docker/README.md. Also add the '-p' commandline option to the 'docker run' example. Contributed by Jurrie Overgoor. ([\#4968](https://github.com/matrix-org/synapse/issues/4968))
+- Refactor room version definitions. ([\#4969](https://github.com/matrix-org/synapse/issues/4969))
+- Reduce log level of .well-known/matrix/client responses. ([\#4972](https://github.com/matrix-org/synapse/issues/4972))
+- Add `config.signing_key_path` that can be read by `synapse.config` utility. ([\#4974](https://github.com/matrix-org/synapse/issues/4974))
+- Track which identity server is used when binding a threepid and use that for unbinding, as per MSC1915. ([\#4982](https://github.com/matrix-org/synapse/issues/4982))
+- Rewrite KeyringTestCase as a HomeserverTestCase. ([\#4985](https://github.com/matrix-org/synapse/issues/4985))
+- README updates: Corrected the default POSTGRES_USER. Added port forwarding hint in TLS section. ([\#4987](https://github.com/matrix-org/synapse/issues/4987))
+- Remove a number of unused tables from the database schema. ([\#4992](https://github.com/matrix-org/synapse/issues/4992), [\#5028](https://github.com/matrix-org/synapse/issues/5028), [\#5033](https://github.com/matrix-org/synapse/issues/5033))
+- Run `black` on the remainder of `synapse/storage/`. ([\#4996](https://github.com/matrix-org/synapse/issues/4996))
+- Fix grammar in get_current_users_in_room and give it a docstring. ([\#4998](https://github.com/matrix-org/synapse/issues/4998))
+- Clean up some code in the server-key Keyring. ([\#5001](https://github.com/matrix-org/synapse/issues/5001))
+- Convert SYNAPSE_NO_TLS Docker variable to boolean for user friendliness. Contributed by Gabriel Eckerson. ([\#5005](https://github.com/matrix-org/synapse/issues/5005))
+- Refactor synapse.storage._base._simple_select_list_paginate. ([\#5007](https://github.com/matrix-org/synapse/issues/5007))
+- Store the notary server name correctly in server_keys_json. ([\#5024](https://github.com/matrix-org/synapse/issues/5024))
+- Rewrite Datastore.get_server_verify_keys to reduce the number of database transactions. ([\#5030](https://github.com/matrix-org/synapse/issues/5030))
+- Remove extraneous period from copyright headers. ([\#5046](https://github.com/matrix-org/synapse/issues/5046))
+- Update documentation for where to get Synapse packages. ([\#5067](https://github.com/matrix-org/synapse/issues/5067))
+- Add workarounds for pep-517 install errors. ([\#5098](https://github.com/matrix-org/synapse/issues/5098))
+- Improve logging when event-signature checks fail. ([\#5100](https://github.com/matrix-org/synapse/issues/5100))
+- Factor out an "assert_requester_is_admin" function. ([\#5120](https://github.com/matrix-org/synapse/issues/5120))
+- Remove the requirement to authenticate for /admin/server_version. ([\#5122](https://github.com/matrix-org/synapse/issues/5122))
+- Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. ([\#5155](https://github.com/matrix-org/synapse/issues/5155))
+- Run `black` on the tests directory. ([\#5170](https://github.com/matrix-org/synapse/issues/5170))
+- Fix CI after new release of isort. ([\#5179](https://github.com/matrix-org/synapse/issues/5179))
+- Fix bogus imports in unit tests. ([\#5154](https://github.com/matrix-org/synapse/issues/5154))
+
+
+Synapse 0.99.3.2 (2019-05-03)
+=============================
+
+Internal Changes
+----------------
+
+- Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. ([\#5135](https://github.com/matrix-org/synapse/issues/5135))
+
+
+Synapse 0.99.3.1 (2019-05-03)
+=============================
+
+Security update
+---------------
+
+This release includes two security fixes:
+
+- Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! ([\#5133](https://github.com/matrix-org/synapse/issues/5133))
+- Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! ([\#5134](https://github.com/matrix-org/synapse/issues/5134))
+
+Synapse 0.99.3 (2019-04-01)
+===========================
+
+No significant changes.
+
+
+Synapse 0.99.3rc1 (2019-03-27)
+==============================
+
+Features
+--------
+
+- The user directory has been rewritten to make it faster, with less chance of falling behind on a large server. ([\#4537](https://github.com/matrix-org/synapse/issues/4537), [\#4846](https://github.com/matrix-org/synapse/issues/4846), [\#4864](https://github.com/matrix-org/synapse/issues/4864), [\#4887](https://github.com/matrix-org/synapse/issues/4887), [\#4900](https://github.com/matrix-org/synapse/issues/4900), [\#4944](https://github.com/matrix-org/synapse/issues/4944))
+- Add configurable rate limiting to the /register endpoint. ([\#4735](https://github.com/matrix-org/synapse/issues/4735), [\#4804](https://github.com/matrix-org/synapse/issues/4804))
+- Move server key queries to federation reader. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
+- Add support for /account/3pid REST endpoint to client_reader worker. ([\#4759](https://github.com/matrix-org/synapse/issues/4759))
+- Add an endpoint to the admin API for querying the server version. Contributed by Joseph Weston. ([\#4772](https://github.com/matrix-org/synapse/issues/4772))
+- Include a default configuration file in the 'docs' directory. ([\#4791](https://github.com/matrix-org/synapse/issues/4791), [\#4801](https://github.com/matrix-org/synapse/issues/4801))
+- Synapse is now permissive about trailing slashes on some of its federation endpoints, allowing zero or more to be present. ([\#4793](https://github.com/matrix-org/synapse/issues/4793))
+- Add support for /keys/query and /keys/changes REST endpoints to client_reader worker. ([\#4796](https://github.com/matrix-org/synapse/issues/4796))
+- Add checks to incoming events over federation for events evading auth (aka "soft fail"). ([\#4814](https://github.com/matrix-org/synapse/issues/4814))
+- Add configurable rate limiting to the /login endpoint. ([\#4821](https://github.com/matrix-org/synapse/issues/4821), [\#4865](https://github.com/matrix-org/synapse/issues/4865))
+- Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: #3622. ([\#4840](https://github.com/matrix-org/synapse/issues/4840))
+- Allow passing --daemonize flags to workers in the same way as with master. ([\#4853](https://github.com/matrix-org/synapse/issues/4853))
+- Batch up outgoing read-receipts to reduce federation traffic. ([\#4890](https://github.com/matrix-org/synapse/issues/4890), [\#4927](https://github.com/matrix-org/synapse/issues/4927))
+- Add option to disable searching the user directory. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
+- Add option to disable searching of local and remote public room lists. ([\#4896](https://github.com/matrix-org/synapse/issues/4896))
+- Add ability for password providers to login/register a user via 3PID (email, phone). ([\#4931](https://github.com/matrix-org/synapse/issues/4931))
+
+
+Bugfixes
+--------
+
+- Fix a bug where media with spaces in the name would get a corrupted name. ([\#2090](https://github.com/matrix-org/synapse/issues/2090))
+- Fix attempting to paginate in rooms where server cannot see any events, to avoid unnecessarily pulling in lots of redacted events. ([\#4699](https://github.com/matrix-org/synapse/issues/4699))
+- 'event_id' is now a required parameter in federated state requests, as per the matrix spec. ([\#4740](https://github.com/matrix-org/synapse/issues/4740))
+- Fix tightloop over connecting to replication server. ([\#4749](https://github.com/matrix-org/synapse/issues/4749))
+- Fix parsing of Content-Disposition headers on remote media requests and URL previews. ([\#4763](https://github.com/matrix-org/synapse/issues/4763))
+- Fix incorrect log about not persisting duplicate state event. ([\#4776](https://github.com/matrix-org/synapse/issues/4776))
+- Fix v4v6 option in HAProxy example config. Contributed by Flakebi. ([\#4790](https://github.com/matrix-org/synapse/issues/4790))
+- Handle batch updates in worker replication protocol. ([\#4792](https://github.com/matrix-org/synapse/issues/4792))
+- Fix bug where we didn't correctly throttle sending of USER_IP commands over replication. ([\#4818](https://github.com/matrix-org/synapse/issues/4818))
+- Fix potential race in handling missing updates in device list updates. ([\#4829](https://github.com/matrix-org/synapse/issues/4829))
+- Fix bug where synapse expected an un-specced `prev_state` field on state events. ([\#4837](https://github.com/matrix-org/synapse/issues/4837))
+- Transfer a user's notification settings (push rules) on room upgrade. ([\#4838](https://github.com/matrix-org/synapse/issues/4838))
+- fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
+- Fix a bug where hs_disabled_message was sometimes not correctly enforced. ([\#4888](https://github.com/matrix-org/synapse/issues/4888))
+- Fix bug in shutdown room admin API where it would fail if a user in the room hadn't consented to the privacy policy. ([\#4904](https://github.com/matrix-org/synapse/issues/4904))
+- Fix bug where blocked world-readable rooms were still peekable. ([\#4908](https://github.com/matrix-org/synapse/issues/4908))
+
+
+Internal Changes
+----------------
+
+- Add a systemd setup that supports synapse workers. Contributed by Luca Corbatto. ([\#4662](https://github.com/matrix-org/synapse/issues/4662))
+- Change from TravisCI to Buildkite for CI. ([\#4752](https://github.com/matrix-org/synapse/issues/4752))
+- When presence is disabled don't send over replication. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
+- Minor docstring fixes for MatrixFederationAgent. ([\#4765](https://github.com/matrix-org/synapse/issues/4765))
+- Optimise EDU transmission for the federation_sender worker. ([\#4770](https://github.com/matrix-org/synapse/issues/4770))
+- Update test_typing to use HomeserverTestCase. ([\#4771](https://github.com/matrix-org/synapse/issues/4771))
+- Update URLs for riot.im icons and logos in the default notification templates. ([\#4779](https://github.com/matrix-org/synapse/issues/4779))
+- Removed unnecessary $ from some federation endpoint path regexes. ([\#4794](https://github.com/matrix-org/synapse/issues/4794))
+- Remove link to deleted title in README. ([\#4795](https://github.com/matrix-org/synapse/issues/4795))
+- Clean up read-receipt handling. ([\#4797](https://github.com/matrix-org/synapse/issues/4797))
+- Add some debug about processing read receipts. ([\#4798](https://github.com/matrix-org/synapse/issues/4798))
+- Clean up some replication code. ([\#4799](https://github.com/matrix-org/synapse/issues/4799))
+- Add some docstrings. ([\#4815](https://github.com/matrix-org/synapse/issues/4815))
+- Add debug logger to try and track down #4422. ([\#4816](https://github.com/matrix-org/synapse/issues/4816))
+- Make shutdown API send explanation message to room after users have been forced joined. ([\#4817](https://github.com/matrix-org/synapse/issues/4817))
+- Update example_log_config.yaml. ([\#4820](https://github.com/matrix-org/synapse/issues/4820))
+- Document the `generate` option for the docker image. ([\#4824](https://github.com/matrix-org/synapse/issues/4824))
+- Fix check-newsfragment for debian-only changes. ([\#4825](https://github.com/matrix-org/synapse/issues/4825))
+- Add some debug logging for device list updates to help with #4828. ([\#4828](https://github.com/matrix-org/synapse/issues/4828))
+- Improve federation documentation, specifically .well-known support. Many thanks to @vaab. ([\#4832](https://github.com/matrix-org/synapse/issues/4832))
+- Disable captcha registration by default in unit tests. ([\#4839](https://github.com/matrix-org/synapse/issues/4839))
+- Add stuff back to the .gitignore. ([\#4843](https://github.com/matrix-org/synapse/issues/4843))
+- Clarify what registration_shared_secret allows for. ([\#4844](https://github.com/matrix-org/synapse/issues/4844))
+- Correctly log expected errors when fetching server keys. ([\#4847](https://github.com/matrix-org/synapse/issues/4847))
+- Update install docs to explicitly state a full-chain (not just the top-level) TLS certificate must be provided to Synapse. This caused some people's Synapse ports to appear correct in a browser but still (rightfully so) upset the federation tester. ([\#4849](https://github.com/matrix-org/synapse/issues/4849))
+- Move client read-receipt processing to federation sender worker. ([\#4852](https://github.com/matrix-org/synapse/issues/4852))
+- Refactor federation TransactionQueue. ([\#4855](https://github.com/matrix-org/synapse/issues/4855))
+- Comment out most options in the generated config. ([\#4863](https://github.com/matrix-org/synapse/issues/4863))
+- Fix yaml library warnings by using safe_load. ([\#4869](https://github.com/matrix-org/synapse/issues/4869))
+- Update Apache setup to remove location syntax. Thanks to @cwmke! ([\#4870](https://github.com/matrix-org/synapse/issues/4870))
+- Reinstate test case that runs unit tests against oldest supported dependencies. ([\#4879](https://github.com/matrix-org/synapse/issues/4879))
+- Update link to federation docs. ([\#4881](https://github.com/matrix-org/synapse/issues/4881))
+- fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
+- Use a regular HomeServerConfig object for unit tests rater than a Mock. ([\#4889](https://github.com/matrix-org/synapse/issues/4889))
+- Add some notes about tuning postgres for larger deployments. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
+- Add a config option for torture-testing worker replication. ([\#4902](https://github.com/matrix-org/synapse/issues/4902))
+- Log requests which are simulated by the unit tests. ([\#4905](https://github.com/matrix-org/synapse/issues/4905))
+- Allow newsfragments to end with exclamation marks. Exciting! ([\#4912](https://github.com/matrix-org/synapse/issues/4912))
+- Refactor some more tests to use HomeserverTestCase. ([\#4913](https://github.com/matrix-org/synapse/issues/4913))
+- Refactor out the state deltas portion of the user directory store and handler. ([\#4917](https://github.com/matrix-org/synapse/issues/4917))
+- Fix nginx example in ACME doc. ([\#4923](https://github.com/matrix-org/synapse/issues/4923))
+- Use an explicit dbname for postgres connections in the tests. ([\#4928](https://github.com/matrix-org/synapse/issues/4928))
+- Fix `ClientReplicationStreamProtocol.__str__()`. ([\#4929](https://github.com/matrix-org/synapse/issues/4929))
+
+
Synapse 0.99.2 (2019-03-01)
===========================
diff --git a/INSTALL.md b/INSTALL.md
index 2993f3a..1934593 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -35,7 +35,7 @@ virtualenv -p python3 ~/synapse/env
source ~/synapse/env/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools
-pip install matrix-synapse[all]
+pip install matrix-synapse
```
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
@@ -48,7 +48,7 @@ update flag:
```
source ~/synapse/env/bin/activate
-pip install -U matrix-synapse[all]
+pip install -U matrix-synapse
```
Before you can start Synapse, you will need to generate a configuration
@@ -71,7 +71,8 @@ set this to the hostname of your server. For a more production-ready setup, you
will probably want to specify your domain (`example.com`) rather than a
matrix-specific hostname here (in the same way that your email address is
probably `user@example.com` rather than `user@email.example.com`) - but
-doing so may require more advanced setup. - see [Setting up Federation](README.rst#setting-up-federation). Beware that the server name cannot be changed later.
+doing so may require more advanced setup: see [Setting up Federation](docs/federate.md).
+Beware that the server name cannot be changed later.
This command will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your Home Server to
@@ -256,18 +257,29 @@ https://github.com/spantaleev/matrix-docker-ansible-deploy
#### Matrix.org packages
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
-Synapse via https://matrix.org/packages/debian/. To use them:
+Synapse via https://packages.matrix.org/debian/. They are available for Debian
+9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
```
-sudo apt install -y lsb-release curl apt-transport-https
-echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" |
+sudo apt install -y lsb-release wget apt-transport-https
+sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
+echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
sudo tee /etc/apt/sources.list.d/matrix-org.list
-curl "https://matrix.org/packages/debian/repo-key.asc" |
- sudo apt-key add -
sudo apt update
sudo apt install matrix-synapse-py3
```
+**Note**: if you followed a previous version of these instructions which
+recommended using `apt-key add` to add an old key from
+`https://matrix.org/packages/debian/`, you should note that this key has been
+revoked. You should remove the old key with `sudo apt-key remove
+C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
+update your configuration.
+
+The fingerprint of the repository signing key (as shown by `gpg
+/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
+`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
+
#### Downstream Debian/Ubuntu packages
For `buster` and `sid`, Synapse is available in the Debian repositories and
@@ -374,9 +386,16 @@ To configure Synapse to expose an HTTPS port, you will need to edit
* You will also need to uncomment the `tls_certificate_path` and
`tls_private_key_path` lines under the `TLS` section. You can either
point these settings at an existing certificate and key, or you can
- enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
- for having Synapse automatically provision and renew federation
- certificates through ACME can be found at [ACME.md](docs/ACME.md).
+ enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
+ for having Synapse automatically provision and renew federation
+ certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
+ are using your own certificate, be sure to use a `.pem` file that includes
+ the full certificate chain including any intermediate certificates (for
+ instance, if using certbot, use `fullchain.pem` as your certificate, not
+ `cert.pem`).
+
+For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
+please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
## Registering a user
@@ -402,8 +421,8 @@ This process uses a setting `registration_shared_secret` in
`homeserver.yaml`, which is shared between Synapse itself and the
`register_new_matrix_user` script. It doesn't matter what it is (a random
value is generated by `--generate-config`), but it should be kept secret, as
-anyone with knowledge of it can register users on your server even if
-`enable_registration` is `false`.
+anyone with knowledge of it can register users, including admin accounts,
+on your server even if `enable_registration` is `false`.
## Setting up a TURN server
diff --git a/MANIFEST.in b/MANIFEST.in
index eb2de60..0500dd6 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -39,6 +39,7 @@ prune .circleci
prune .coveragerc
prune debian
prune .codecov.yml
+prune .buildkite
exclude jenkins*
recursive-exclude jenkins *.sh
diff --git a/README.rst b/README.rst
index 8e22109..5409f0c 100644
--- a/README.rst
+++ b/README.rst
@@ -80,7 +80,10 @@ Thanks for using Matrix!
Synapse Installation
====================
-For details on how to install synapse, see `<INSTALL.md>`_.
+.. _federation:
+
+* For details on how to install synapse, see `<INSTALL.md>`_.
+* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
Connecting to Synapse from a client
@@ -93,13 +96,13 @@ Unless you are running a test instance of Synapse on your local machine, in
general, you will need to enable TLS support before you can successfully
connect from a client: see `<INSTALL.md#tls-certificates>`_.
-An easy way to get started is to login or register via Riot at
-https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
+An easy way to get started is to login or register via Riot at
+https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
You will need to change the server you are logging into from ``matrix.org``
-and instead specify a Homeserver URL of ``https://<server_name>:8448``
-(or just ``https://<server_name>`` if you are using a reverse proxy).
-(Leave the identity server as the default - see `Identity servers`_.)
-If you prefer to use another client, refer to our
+and instead specify a Homeserver URL of ``https://<server_name>:8448``
+(or just ``https://<server_name>`` if you are using a reverse proxy).
+(Leave the identity server as the default - see `Identity servers`_.)
+If you prefer to use another client, refer to our
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
If all goes well you should at least be able to log in, create a room, and
@@ -117,9 +120,9 @@ recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
Once ``enable_registration`` is set to ``true``, it is possible to register a
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
-Your new user name will be formed partly from the ``server_name`` (see
-`Configuring synapse`_), and partly from a localpart you specify when you
-create the account. Your name will take the form of::
+Your new user name will be formed partly from the ``server_name``, and partly
+from a localpart you specify when you create the account. Your name will take
+the form of::
@localpart:my.domain.name
@@ -151,56 +154,6 @@ server on the same domain.
See https://github.com/vector-im/riot-web/issues/1977 and
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
-Troubleshooting
-===============
-
-Running out of File Handles
----------------------------
-
-If synapse runs out of filehandles, it typically fails badly - live-locking
-at 100% CPU, and/or failing to accept new TCP connections (blocking the
-connecting client). Matrix currently can legitimately use a lot of file handles,
-thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
-servers. The first time a server talks in a room it will try to connect
-simultaneously to all participating servers, which could exhaust the available
-file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
-to respond. (We need to improve the routing algorithm used to be better than
-full mesh, but as of June 2017 this hasn't happened yet).
-
-If you hit this failure mode, we recommend increasing the maximum number of
-open file handles to be at least 4096 (assuming a default of 1024 or 256).
-This is typically done by editing ``/etc/security/limits.conf``
-
-Separately, Synapse may leak file handles if inbound HTTP requests get stuck
-during processing - e.g. blocked behind a lock or talking to a remote server etc.
-This is best diagnosed by matching up the 'Received request' and 'Processed request'
-log lines and looking for any 'Processed request' lines which take more than
-a few seconds to execute. Please let us know at #synapse:matrix.org if
-you see this failure mode so we can help debug it, however.
-
-Help!! Synapse eats all my RAM!
--------------------------------
-
-Synapse's architecture is quite RAM hungry currently - we deliberately
-cache a lot of recent room data and metadata in RAM in order to speed up
-common requests. We'll improve this in future, but for now the easiest
-way to either reduce the RAM usage (at the risk of slowing things down)
-is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
-variable. The default is 0.5, which can be decreased to reduce RAM usage
-in memory constrained enviroments, or increased if performance starts to
-degrade.
-
-Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
-improvement in overall amount, and especially in terms of giving back RAM
-to the OS. To use it, the library must simply be put in the LD_PRELOAD
-environment variable when launching Synapse. On Debian, this can be done
-by installing the ``libjemalloc1`` package and adding this line to
-``/etc/default/matrix-synapse``::
-
- LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
-
-This can make a significant difference on Python 2.7 - it's unclear how
-much of an improvement it provides on Python 3.x.
Upgrading an existing Synapse
=============================
@@ -211,100 +164,19 @@ versions of synapse.
.. _UPGRADE.rst: UPGRADE.rst
-.. _federation:
-
-Setting up Federation
-=====================
-
-Federation is the process by which users on different servers can participate
-in the same room. For this to work, those other servers must be able to contact
-yours to send messages.
-
-The ``server_name`` in your ``homeserver.yaml`` file determines the way that
-other servers will reach yours. By default, they will treat it as a hostname
-and try to connect to port 8448. This is easy to set up and will work with the
-default configuration, provided you set the ``server_name`` to match your
-machine's public DNS hostname, and give Synapse a TLS certificate which is
-valid for your ``server_name``.
-
-For a more flexible configuration, you can set up a DNS SRV record. This allows
-you to run your server on a machine that might not have the same name as your
-domain name. For example, you might want to run your server at
-``synapse.example.com``, but have your Matrix user-ids look like
-``@user:example.com``. (A SRV record also allows you to change the port from
-the default 8448).
-
-To use a SRV record, first create your SRV record and publish it in DNS. This
-should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
-<synapse.server.name>``. The DNS record should then look something like::
-
- $ dig -t srv _matrix._tcp.example.com
- _matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
-
-Note that the server hostname cannot be an alias (CNAME record): it has to point
-directly to the server hosting the synapse instance.
-
-You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
-its user-ids, by setting ``server_name``::
-
- python -m synapse.app.homeserver \
- --server-name <yourdomain.com> \
- --config-path homeserver.yaml \
- --generate-config
- python -m synapse.app.homeserver --config-path homeserver.yaml
-
-If you've already generated the config file, you need to edit the ``server_name``
-in your ``homeserver.yaml`` file. If you've already started Synapse and a
-database has been created, you will have to recreate the database.
-
-If all goes well, you should be able to `connect to your server with a client`__,
-and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
-step. "Matrix HQ"'s sheer size and activity level tends to make even the
-largest boxes pause for thought.)
-
-.. __: `Connecting to Synapse from a client`_
-
-Troubleshooting
----------------
-
-You can use the `federation tester <https://matrix.org/federationtester>`_ to
-check if your homeserver is all set.
-
-The typical failure mode with federation is that when you try to join a room,
-it is rejected with "401: Unauthorized". Generally this means that other
-servers in the room couldn't access yours. (Joining a room over federation is a
-complicated dance which requires connections in both directions).
-
-So, things to check are:
-
-* If you are not using a SRV record, check that your ``server_name`` (the part
- of your user-id after the ``:``) matches your hostname, and that port 8448 on
- that hostname is reachable from outside your network.
-* If you *are* using a SRV record, check that it matches your ``server_name``
- (it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
- it specifies are reachable from outside your network.
-
-Another common problem is that people on other servers can't join rooms that
-you invite them to. This can be caused by an incorrectly-configured reverse
-proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
-configure a reverse proxy.
-
-Running a Demo Federation of Synapses
--------------------------------------
-
-If you want to get up and running quickly with a trio of homeservers in a
-private federation, there is a script in the ``demo`` directory. This is mainly
-useful just for development purposes. See `<demo/README>`_.
-
Using PostgreSQL
================
-As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
-alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
-traditionally used for convenience and simplicity.
+Synapse offers two database engines:
+ * `SQLite <https://sqlite.org/>`_
+ * `PostgreSQL <https://www.postgresql.org>`_
+
+By default Synapse uses SQLite in and doing so trades performance for convenience.
+SQLite is only recommended in Synapse for testing purposes or for servers with
+light workloads.
-The advantages of Postgres include:
+Almost all installations should opt to use PostreSQL. Advantages include:
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
@@ -400,7 +272,7 @@ to install using pip and a virtualenv::
virtualenv -p python3 env
source env/bin/activate
- python -m pip install -e .[all]
+ python -m pip install --no-pep-517 -e .[all]
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
@@ -440,3 +312,54 @@ sphinxcontrib-napoleon::
Building internal API documentation::
python setup.py build_sphinx
+
+Troubleshooting
+===============
+
+Running out of File Handles
+---------------------------
+
+If synapse runs out of file handles, it typically fails badly - live-locking
+at 100% CPU, and/or failing to accept new TCP connections (blocking the
+connecting client). Matrix currently can legitimately use a lot of file handles,
+thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
+servers. The first time a server talks in a room it will try to connect
+simultaneously to all participating servers, which could exhaust the available
+file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
+to respond. (We need to improve the routing algorithm used to be better than
+full mesh, but as of March 2019 this hasn't happened yet).
+
+If you hit this failure mode, we recommend increasing the maximum number of
+open file handles to be at least 4096 (assuming a default of 1024 or 256).
+This is typically done by editing ``/etc/security/limits.conf``
+
+Separately, Synapse may leak file handles if inbound HTTP requests get stuck
+during processing - e.g. blocked behind a lock or talking to a remote server etc.
+This is best diagnosed by matching up the 'Received request' and 'Processed request'
+log lines and looking for any 'Processed request' lines which take more than
+a few seconds to execute. Please let us know at #synapse:matrix.org if
+you see this failure mode so we can help debug it, however.
+
+Help!! Synapse eats all my RAM!
+-------------------------------
+
+Synapse's architecture is quite RAM hungry currently - we deliberately
+cache a lot of recent room data and metadata in RAM in order to speed up
+common requests. We'll improve this in the future, but for now the easiest
+way to either reduce the RAM usage (at the risk of slowing things down)
+is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
+variable. The default is 0.5, which can be decreased to reduce RAM usage
+in memory constrained enviroments, or increased if performance starts to
+degrade.
+
+Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
+improvement in overall amount, and especially in terms of giving back RAM
+to the OS. To use it, the library must simply be put in the LD_PRELOAD
+environment variable when launching Synapse. On Debian, this can be done
+by installing the ``libjemalloc1`` package and adding this line to
+``/etc/default/matrix-synapse``::
+
+ LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
+
+This can make a significant difference on Python 2.7 - it's unclear how
+much of an improvement it provides on Python 3.x.
diff --git a/changelog.d/5133.bugfix b/changelog.d/5133.bugfix
deleted file mode 100644
index 12a32a9..0000000
--- a/changelog.d/5133.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for for identifying and responsibly disclosing this issue!
diff --git a/changelog.d/5134.bugfix b/changelog.d/5134.bugfix
deleted file mode 100644
index 684d48c..0000000
--- a/changelog.d/5134.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too!
diff --git a/contrib/example_log_config.yaml b/contrib/example_log_config.yaml
index c7aa68a..0659296 100644
--- a/contrib/example_log_config.yaml
+++ b/contrib/example_log_config.yaml
@@ -19,6 +19,7 @@ handlers:
# example output to console
console:
class: logging.StreamHandler
+ formatter: fmt
filters: [context]
# example output to file - to enable, edit 'root' config below.
@@ -29,7 +30,7 @@ handlers:
maxBytes: 100000000
backupCount: 3
filters: [context]
-
+ encoding: utf8
root:
level: INFO
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index dc3f4a1..4abeb7d 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -1,5211 +1 @@
-{
- "__inputs": [
- {
- "name": "DS_PROMETHEUS",
- "label": "Prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- }
- ],
- "__requires": [
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "5.2.4"
- },
- {
- "type": "panel",
- "id": "graph",
- "name": "Graph",
- "version": "5.0.0"
- },
- {
- "type": "panel",
- "id": "heatmap",
- "name": "Heatmap",
- "version": "5.0.0"
- },
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "5.0.0"
- }
- ],
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "$datasource",
- "enable": false,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "limit": 100,
- "name": "Annotations & Alerts",
- "showIn": 0,
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 0,
- "id": null,
- "iteration": 1537878047048,
- "links": [
- {
- "asDropdown": true,
- "icon": "external link",
- "keepTime": true,
- "tags": [
- "matrix"
- ],
- "title": "Dashboards",
- "type": "dashboards"
- }
- ],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 73,
- "panels": [],
- "title": "Overview",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 75,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} ",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "CPU usage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": "1",
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "cards": {
- "cardPadding": 0,
- "cardRound": null
- },
- "color": {
- "cardColor": "#b4ff00",
- "colorScale": "sqrt",
- "colorScheme": "interpolateSpectral",
- "exponent": 0.5,
- "mode": "spectrum"
- },
- "dataFormat": "tsbuckets",
- "datasource": "$datasource",
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "heatmap": {},
- "highlightCards": true,
- "id": 85,
- "legend": {
- "show": false
- },
- "links": [],
- "targets": [
- {
- "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
- "format": "heatmap",
- "intervalFactor": 1,
- "legendFormat": "{{le}}",
- "refId": "A"
- }
- ],
- "title": "Event Send Time",
- "tooltip": {
- "show": true,
- "showHistogram": false
- },
- "type": "heatmap",
- "xAxis": {
- "show": true
- },
- "xBucketNumber": null,
- "xBucketSize": null,
- "yAxis": {
- "decimals": null,
- "format": "s",
- "logBase": 2,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
- },
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 33,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "",
- "refId": "A",
- "step": 20,
- "target": ""
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Events Persisted",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 17
- },
- "id": 54,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 0,
- "grid": {},
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 18
- },
- "id": 34,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": true,
- "targets": [
- {
- "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}} {{index}}",
- "refId": "A",
- "step": 20,
- "target": ""
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Memory",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "bytes",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 18
- },
- "id": 37,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/max$/",
- "color": "#890F02",
- "fill": 0,
- "legend": false
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
- },
- {
- "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "hide": true,
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} max",
- "refId": "B",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Open FDs",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 25
- },
- "id": 50,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Avg reactor tick time",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 25
- },
- "id": 105,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} 99%",
- "refId": "A",
- "step": 20
- },
- {
- "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 95%",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 90%",
- "refId": "C"
- },
- {
- "expr": "",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "D"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Reactor tick quantiles",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "id": 53,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Up",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 32
- },
- "id": 49,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/^up/",
- "legend": false,
- "yaxis": 2
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Prometheus scrape time",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "decimals": 0,
- "format": "none",
- "label": "",
- "logBase": 1,
- "max": "0",
- "min": "-1",
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 39
- },
- "id": 5,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/user/"
- },
- {
- "alias": "/system/"
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} system ",
- "metric": "",
- "refId": "B",
- "step": 20
- },
- {
- "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} user",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [
- {
- "colorMode": "custom",
- "line": true,
- "lineColor": "rgba(216, 200, 27, 0.27)",
- "op": "gt",
- "value": 0.5
- },
- {
- "colorMode": "custom",
- "line": true,
- "lineColor": "rgba(234, 112, 112, 0.22)",
- "op": "gt",
- "value": 0.8
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "CPU",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1.2",
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Process info",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 18
- },
- "id": 56,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 47
- },
- "id": 40,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{type}}",
- "refId": "D"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Events/s Local vs Remote",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 47
- },
- "id": 46,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "instant": false,
- "intervalFactor": 2,
- "legendFormat": "{{type}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Events/s by Type",
- "tooltip": {
- "shared": false,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {
- "irc-freenode (local)": "#EAB839"
- },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 54
- },
- "id": 44,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{origin_entity}} ({{origin_type}})",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Events/s by Origin",
- "tooltip": {
- "shared": false,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 54
- },
- "id": 45,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\"}[$bucket_size])) by (origin_type, origin_entity)",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{origin_entity}} ({{origin_type}})",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Memberships/s by Origin",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Event persist rates",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 19
- },
- "id": 57,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": null,
- "editable": true,
- "error": false,
- "fill": 2,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 62
- },
- "id": 4,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(216, 200, 27, 0.27)",
- "op": "gt",
- "value": 100
- },
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(234, 112, 112, 0.22)",
- "op": "gt",
- "value": 250
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Request Count by arrival time",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "transparent": false,
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 62
- },
- "id": 32,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}",
- "refId": "A",
- "step": 20,
- "target": ""
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Top 10 Request Counts",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": null,
- "editable": true,
- "error": false,
- "fill": 2,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 70
- },
- "id": 23,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(216, 200, 27, 0.27)",
- "op": "gt",
- "value": 100,
- "yaxis": "left"
- },
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(234, 112, 112, 0.22)",
- "op": "gt",
- "value": 250,
- "yaxis": "left"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total CPU Usage by Endpoint",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "transparent": false,
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": null,
- "editable": true,
- "error": false,
- "fill": 2,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 70
- },
- "id": 52,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": true,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(216, 200, 27, 0.27)",
- "op": "gt",
- "value": 100
- },
- {
- "colorMode": "custom",
- "fill": true,
- "fillColor": "rgba(234, 112, 112, 0.22)",
- "op": "gt",
- "value": 250
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average CPU Usage by Endpoint",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "transparent": false,
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 78
- },
- "id": 7,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "DB Usage by endpoint",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": null,
- "editable": true,
- "error": false,
- "fill": 2,
- "grid": {},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 78
- },
- "id": 47,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": false,
- "hideEmpty": false,
- "hideZero": true,
- "max": true,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])/rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Non-sync avg response time",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "transparent": false,
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 86
- },
- "id": 103,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Requests in flight",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Requests",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 20
- },
- "id": 97,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 49
- },
- "id": 99,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "CPU usage by background jobs",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 49
- },
- "id": 101,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{name}}",
- "refId": "A"
- },
- {
- "expr": "",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "DB usage by background jobs (including scheduling time)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "title": "Background jobs",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 21
- },
- "id": 81,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 64
- },
- "id": 79,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_federation_client_sent_transactions{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "txn rate",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Outgoing federation transaction rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 64
- },
- "id": 83,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_federation_server_received_pdus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "pdus",
- "refId": "A"
- },
- {
- "expr": "rate(synapse_federation_server_received_edus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "edus",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Incoming PDU/EDU rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "title": "Federation",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 22
- },
- "id": 60,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 65
- },
- "id": 51,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_push_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "processed {{job}}",
- "refId": "A",
- "step": 20
- },
- {
- "expr": "rate(synapse_push_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "failed {{job}}",
- "refId": "B",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP Push rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Pushes",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 23
- },
- "id": 58,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 24
- },
- "id": 48,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Avg time waiting for db conn",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "s",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 24
- },
- "id": 104,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{job}} {{index}} 99%",
- "refId": "A",
- "step": 20
- },
- {
- "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}} {{index}} 95%",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}} {{index}} 90%",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Db scheduling time quantiles",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "s",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 0,
- "grid": {},
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 31
- },
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{desc}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Top DB transactions by txn rate",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 31
- },
- "id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": true,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": true,
- "steppedLine": true,
- "targets": [
- {
- "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "instant": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{desc}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Top DB transactions by total txn time",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Database",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 24
- },
- "id": 59,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 0,
- "y": 60
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total CPU Usage by Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 12,
- "y": 60
- },
- "id": 26,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average CPU Time per Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 0,
- "y": 73
- },
- "id": 13,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total DB Usage by Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "percentunit",
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 12,
- "y": 73
- },
- "id": 27,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average Database Time per Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 0,
- "y": 86
- },
- "id": 28,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average Transactions per Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 13,
- "w": 12,
- "x": 12,
- "y": 86
- },
- "id": 25,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{block_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average Wallclock Time per Block",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Per-block metrics",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 61,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 2,
- "editable": true,
- "error": false,
- "fill": 0,
- "grid": {},
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 68
- },
- "id": 1,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{name}} {{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Cache Hit Ratio",
- "tooltip": {
- "msResolution": true,
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1",
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 68
- },
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "connected",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "hide": false,
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{name}} {{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Cache Size",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 78
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "connected",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{name}} {{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Cache request rate",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "rps",
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 78
- },
- "id": 39,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{name}} {{job}}-{{index}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Top 10 cache misses",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "rps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 88
- },
- "id": 65,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{name}} {{job}}-{{index}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Cache eviction rate",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "transparent": false,
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "hertz",
- "label": "entries / second",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Caches",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 62,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 27
- },
- "id": 91,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": true,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])",
- "format": "time_series",
- "instant": false,
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} gen {{gen}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total GC time by bucket (10m smoothing)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "decimals": 3,
- "editable": true,
- "error": false,
- "fill": 1,
- "grid": {},
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 27
- },
- "id": 21,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}} {{index}} gen {{gen}} ",
- "refId": "A",
- "step": 20,
- "target": ""
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Average GC Time Per Collection",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "cumulative"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 36
- },
- "id": 89,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} gen {{gen}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Currently allocated objects",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 36
- },
- "id": 93,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} gen {{gen}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Object counts per collection",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 45
- },
- "id": 95,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} gen {{gen}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "GC frequency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "cards": {
- "cardPadding": 0,
- "cardRound": null
- },
- "color": {
- "cardColor": "#b4ff00",
- "colorScale": "sqrt",
- "colorScheme": "interpolateSpectral",
- "exponent": 0.5,
- "max": null,
- "min": 0,
- "mode": "spectrum"
- },
- "dataFormat": "tsbuckets",
- "datasource": "${DS_PROMETHEUS}",
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 45
- },
- "heatmap": {},
- "highlightCards": true,
- "id": 87,
- "legend": {
- "show": true
- },
- "links": [],
- "targets": [
- {
- "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)",
- "format": "heatmap",
- "intervalFactor": 1,
- "legendFormat": "{{le}}",
- "refId": "A"
- }
- ],
- "title": "GC durations",
- "tooltip": {
- "show": true,
- "showHistogram": false
- },
- "type": "heatmap",
- "xAxis": {
- "show": true
- },
- "xBucketNumber": null,
- "xBucketSize": null,
- "yAxis": {
- "decimals": null,
- "format": "s",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
- },
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
- }
- ],
- "repeat": null,
- "title": "GC",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
- },
- "id": 63,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 97
- },
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "user started/stopped syncing",
- "refId": "A",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "federation ack",
- "refId": "B",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "remove pusher",
- "refId": "C",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "invalidate cache",
- "refId": "D",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "user ip cache",
- "refId": "E",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Rate of events on replication master",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 97
- },
- "id": 41,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{stream_name}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Outgoing stream updates",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 104
- },
- "id": 42,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{command}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Rate of incoming commands",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 104
- },
- "id": 43,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{command}}",
- "refId": "A",
- "step": 20
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Rate of outgoing commands",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "repeat": null,
- "title": "Replication",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
- },
- "id": 69,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 29
- },
- "id": 67,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": " synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"} - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} ",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Event processing lag",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": "events",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 29
- },
- "id": 71,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
- "format": "time_series",
- "hide": false,
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{name}}",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Age of last processed event",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "title": "Event processing loop positions",
- "type": "row"
- }
- ],
- "refresh": "1m",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [
- "matrix"
- ],
- "templating": {
- "list": [
- {
- "current": {
- "text": "Prometheus",
- "value": "Prometheus"
- },
- "hide": 0,
- "label": null,
- "name": "datasource",
- "options": [],
- "query": "prometheus",
- "refresh": 1,
- "regex": "",
- "type": "datasource"
- },
- {
- "allFormat": "glob",
- "auto": true,
- "auto_count": 100,
- "auto_min": "30s",
- "current": {
- "text": "auto",
- "value": "$__auto_interval_bucket_size"
- },
- "datasource": null,
- "hide": 0,
- "includeAll": false,
- "label": "Bucket Size",
- "multi": false,
- "multiFormat": "glob",
- "name": "bucket_size",
- "options": [
- {
- "selected": true,
- "text": "auto",
- "value": "$__auto_interval_bucket_size"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "1m",
- "value": "1m"
- },
- {
- "selected": false,
- "text": "2m",
- "value": "2m"
- },
- {
- "selected": false,
- "text": "5m",
- "value": "5m"
- },
- {
- "selected": false,
- "text": "10m",
- "value": "10m"
- },
- {
- "selected": false,
- "text": "15m",
- "value": "15m"
- }
- ],
- "query": "30s,1m,2m,5m,10m,15m",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {},
- "datasource": "$datasource",
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "instance",
- "options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)",
- "refresh": 2,
- "regex": "",
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allFormat": "regex wildcard",
- "allValue": "",
- "current": {},
- "datasource": "$datasource",
- "hide": 0,
- "hideLabel": false,
- "includeAll": true,
- "label": "Job",
- "multi": true,
- "multiFormat": "regex values",
- "name": "job",
- "options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)",
- "refresh": 2,
- "refresh_on_load": false,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allFormat": "regex wildcard",
- "allValue": ".*",
- "current": {},
- "datasource": "$datasource",
- "hide": 0,
- "hideLabel": false,
- "includeAll": true,
- "label": "",
- "multi": true,
- "multiFormat": "regex values",
- "name": "index",
- "options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)",
- "refresh": 2,
- "refresh_on_load": false,
- "regex": "",
- "sort": 3,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-1h",
- "to": "now"
- },
- "timepicker": {
- "now": true,
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Synapse",
- "uid": "000000012",
- "version": 3
-}
\ No newline at end of file
+{"annotations":{"list":[{"builtIn":1,"datasource":"$datasource","enable":false,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","limit":100,"name":"Annotations \u0026 Alerts","showIn":0,"type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"id":4,"iteration":1554391266679,"links":[{"asDropdown":true,"icon":"external link","keepTime":true,"tags":["matrix"],"title":"Dashboards","type":"dashboards"}],"panels":[{"collapsed":false,"gridPos":{"h":1,"w":24,"x":0,"y":0},"id":73,"panels":[],"title":"Overview","type":"row"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":1},"id":75,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} ","refId":"A"}],"thresholds":[{"colorMode":"critical","fill":true,"line":true,"op":"gt","value":1,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU usage","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":null,"logBase":1,"max":"1.5","min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":9,"w":12,"x":12,"y":1},"id":33,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)","format":"time_series","intervalFactor":2,"legendFormat":"","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events Persisted","tooltip":{"shared":true,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"cards":{"cardPadding":0,"cardRound":null},"color":{"cardColor":"#b4ff00","colorScale":"sqrt","colorScheme":"interpolateSpectral","exponent":0.5,"mode":"spectrum"},"dataFormat":"tsbuckets","datasource":"$datasource","gridPos":{"h":9,"w":12,"x":0,"y":10},"heatmap":{},"hideZeroBuckets":true,"highlightCards":true,"id":85,"legend":{"show":false},"links":[],"reverseYBuckets":false,"targets":[{"expr":"sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)","format":"heatmap","intervalFactor":1,"legendFormat":"{{le}}","refId":"A"}],"title":"Event Send Time","tooltip":{"show":true,"showHistogram":false},"type":"heatmap","xAxis":{"show":true},"xBucketNumber":null,"xBucketSize":null,"yAxis":{"decimals":null,"format":"s","logBase":2,"max":null,"min":null,"show":true,"splitFactor":null},"yBucketBound":"auto","yBucketNumber":null,"yBucketSize":null},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":9,"w":12,"x":12,"y":10},"id":107,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","repeat":null,"repeatDirection":"h","seriesOverrides":[{"alias":"mean","linewidth":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"99%","refId":"A"},{"expr":"histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"95%","refId":"B"},{"expr":"histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"90%","refId":"C"},{"expr":"histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"50%","refId":"D"},{"expr":"sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method)","format":"time_series","intervalFactor":1,"legendFormat":"mean","refId":"E"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event send time quantiles","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":9,"w":12,"x":0,"y":19},"id":118,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","repeatDirection":"h","seriesOverrides":[{"alias":"mean","linewidth":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 99%","refId":"A"},{"expr":"histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 90%","refId":"C"},{"expr":"histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 50%","refId":"D"},{"expr":"sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} mean","refId":"E"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event send time quantiles by worker","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":28},"id":54,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":29},"id":5,"legend":{"alignAsTable":false,"avg":false,"current":false,"hideEmpty":false,"hideZero":false,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/user/"},{"alias":"/system/"}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} system ","metric":"","refId":"B","step":20},{"expr":"rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","hide":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} user","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fillColor":"rgba(255, 255, 255, 1)","line":true,"lineColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":0.5},{"colorMode":"custom","fillColor":"rgba(255, 255, 255, 1)","line":true,"lineColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":0.8}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":"","logBase":1,"max":"1.2","min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":29},"id":37,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/max$/","color":"#890F02","fill":0,"legend":false}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20},{"expr":"process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":true,"intervalFactor":2,"legendFormat":"{{job}}-{{index}} max","refId":"B","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Open FDs","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"none","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":36},"id":34,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","intervalFactor":2,"legendFormat":"{{job}} {{index}}","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Memory","tooltip":{"shared":true,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"bytes","logBase":1,"max":null,"min":"0","show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":36},"id":105,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} 99%","refId":"A","step":20},{"expr":"histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 90%","refId":"C"},{"expr":"","format":"time_series","intervalFactor":1,"refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Reactor tick quantiles","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":43},"id":50,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Avg reactor tick time","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":43},"id":49,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/^up/","legend":false,"yaxis":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Prometheus scrape time","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":"0","show":true},{"decimals":0,"format":"none","label":"","logBase":1,"max":"0","min":"-1","show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":7,"w":12,"x":0,"y":50},"id":53,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Up","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":50},"id":120,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A"},{"expr":"rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"B"}],"thresholds":[{"colorMode":"critical","fill":true,"line":true,"op":"gt","value":1,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Stacked CPU usage","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Process info","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":29},"id":56,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":58},"id":40,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{type}}","refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s Local vs Remote","tooltip":{"shared":true,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":58},"id":46,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","instant":false,"intervalFactor":2,"legendFormat":"{{type}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s by Type","tooltip":{"shared":false,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{"irc-freenode (local)":"#EAB839"},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":65},"id":44,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{origin_entity}} ({{origin_type}})","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s by Origin","tooltip":{"shared":false,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":65},"id":45,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)","format":"time_series","intervalFactor":2,"legendFormat":"{{origin_entity}} ({{origin_type}})","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Memberships/s by Origin","tooltip":{"shared":true,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Event persist rates","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":30},"id":57,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":67},"id":4,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Request Count by arrival time","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":67},"id":32,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})","format":"time_series","intervalFactor":2,"legendFormat":"{{method}} {{servlet}} {{job}}-{{index}}","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top 10 Request Counts","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":75},"id":23,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100,"yaxis":"left"},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total CPU Usage by Endpoint","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":75},"id":52,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average CPU Usage by Endpoint","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":83},"id":7,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"DB Usage by endpoint","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":83},"id":47,"legend":{"alignAsTable":true,"avg":true,"current":false,"hideEmpty":false,"hideZero":true,"max":true,"min":false,"rightSide":false,"show":true,"total":false,"values":true},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))","format":"time_series","hide":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Non-sync avg response time","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":91},"id":103,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Requests in flight","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Requests","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":31},"id":97,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":32},"id":99,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU usage by background jobs","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":32},"id":101,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"A"},{"expr":"","format":"time_series","intervalFactor":1,"refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"DB usage by background jobs (including scheduling time)","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Background jobs","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":32},"id":81,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":61},"id":79,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"txn rate","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing federation transaction rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":61},"id":83,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"pdus","refId":"A"},{"expr":"sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"edus","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Incoming PDU/EDU rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":70},"id":109,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"pdus","refId":"A"},{"expr":"sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"edus","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing PDU/EDU rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":70},"id":111,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_federation_client_sent_edus_by_type{instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{type}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing EDUs by type","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Federation","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":33},"id":60,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":62},"id":51,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) \u003e 0","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"processed {{job}}","refId":"A","step":20},{"expr":"rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) \u003e 0","format":"time_series","intervalFactor":2,"legendFormat":"failed {{job}}","refId":"B","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"HTTP Push rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Pushes","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":34},"id":58,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":63},"id":48,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Avg time waiting for db conn","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"s","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":63},"id":104,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}} {{index}} 99%","refId":"A","step":20},{"expr":"histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}} {{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}} {{index}} 90%","refId":"C"},{"expr":"rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}} {{index}} mean","refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Db scheduling time quantiles","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"s","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":70},"id":10,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{desc}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top DB transactions by txn rate","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":7,"w":12,"x":12,"y":70},"id":11,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":true,"targets":[{"expr":"rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{desc}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top DB transactions by total txn time","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Database","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":35},"id":59,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":36},"id":12,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total CPU Usage by Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":36},"id":26,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average CPU Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":49},"id":13,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total DB Usage by Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":49},"id":27,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Database Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":62},"id":28,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Transactions per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"none","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":62},"id":25,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Wallclock Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Per-block metrics","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":36},"id":61,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":2,"editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":10,"w":12,"x":0,"y":37},"id":1,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache Hit Ratio","tooltip":{"msResolution":true,"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":"","logBase":1,"max":"1","min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":10,"w":12,"x":12,"y":37},"id":8,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"interval":"","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache Size","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":10,"w":12,"x":0,"y":47},"id":38,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache request rate","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"rps","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":10,"w":12,"x":12,"y":47},"id":39,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top 10 cache misses","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"rps","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":57},"id":65,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache eviction rate","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"hertz","label":"entries / second","logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Caches","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":37},"id":62,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":66},"id":91,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])","format":"time_series","instant":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total GC time by bucket (10m smoothing)","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":3,"editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":9,"w":12,"x":12,"y":66},"id":21,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null as zero","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}} {{index}} gen {{gen}} ","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average GC Time Per Collection","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":75},"id":89,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/gen 0$/","yaxis":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Allocation counts","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":"Gen N-1 GCs since last Gen N GC","logBase":1,"max":null,"min":null,"show":true},{"decimals":null,"format":"short","label":"Objects since last Gen 0 GC","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":75},"id":93,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Object counts per collection","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":84},"id":95,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"GC frequency","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"cards":{"cardPadding":0,"cardRound":null},"color":{"cardColor":"#b4ff00","colorScale":"sqrt","colorScheme":"interpolateSpectral","exponent":0.5,"max":null,"min":0,"mode":"spectrum"},"dataFormat":"tsbuckets","datasource":"Prometheus","gridPos":{"h":9,"w":12,"x":12,"y":84},"heatmap":{},"hideZeroBuckets":true,"highlightCards":true,"id":87,"legend":{"show":true},"links":[],"reverseYBuckets":false,"targets":[{"expr":"sum(rate(python_gc_time_bucket[$bucket_size])) by (le)","format":"heatmap","intervalFactor":1,"legendFormat":"{{le}}","refId":"A"}],"title":"GC durations","tooltip":{"show":true,"showHistogram":false},"type":"heatmap","xAxis":{"show":true},"xBucketNumber":null,"xBucketSize":null,"yAxis":{"decimals":null,"format":"s","logBase":1,"max":null,"min":null,"show":true,"splitFactor":null},"yBucketBound":"auto","yBucketNumber":null,"yBucketSize":null}],"repeat":null,"title":"GC","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":38},"id":63,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":67},"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"user started/stopped syncing","refId":"A","step":20},{"expr":"rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"federation ack","refId":"B","step":20},{"expr":"rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"remove pusher","refId":"C","step":20},{"expr":"rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"invalidate cache","refId":"D","step":20},{"expr":"rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"user ip cache","refId":"E","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of events on replication master","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":67},"id":41,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{stream_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing stream updates","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":74},"id":42,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{command}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of incoming commands","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":74},"id":43,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{command}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of outgoing commands","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":81},"id":113,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{stream_name}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Replication connections","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":81},"id":115,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_protocol_close_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{reason_type}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Replication connection close reasons","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Replication","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":39},"id":69,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":68},"id":67,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":" synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"} - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} ","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event processing lag","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":"events","logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":68},"id":71,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Age of last processed event","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Event processing loop positions","type":"row"}],"refresh":"1m","schemaVersion":18,"style":"dark","tags":["matrix"],"templating":{"list":[{"current":{"selected":true,"text":"Prometheus","value":"Prometheus"},"hide":0,"includeAll":false,"label":null,"multi":false,"name":"datasource","options":[],"query":"prometheus","refresh":1,"regex":"","skipUrlSync":false,"type":"datasource"},{"allFormat":"glob","auto":true,"auto_count":100,"auto_min":"30s","current":{"text":"auto","value":"$__auto_interval_bucket_size"},"datasource":null,"hide":0,"includeAll":false,"label":"Bucket Size","multi":false,"multiFormat":"glob","name":"bucket_size","options":[{"selected":true,"text":"auto","value":"$__auto_interval_bucket_size"},{"selected":false,"text":"30s","value":"30s"},{"selected":false,"text":"1m","value":"1m"},{"selected":false,"text":"2m","value":"2m"},{"selected":false,"text":"5m","value":"5m"},{"selected":false,"text":"10m","value":"10m"},{"selected":false,"text":"15m","value":"15m"}],"query":"30s,1m,2m,5m,10m,15m","refresh":2,"skipUrlSync":false,"type":"interval"},{"allValue":null,"current":{"text":"matrix.org","value":"matrix.org"},"datasource":"$datasource","definition":"","hide":0,"includeAll":false,"label":null,"multi":false,"name":"instance","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, instance)","refresh":2,"regex":"","skipUrlSync":false,"sort":0,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false},{"allFormat":"regex wildcard","allValue":"","current":{"text":"synapse","value":["synapse"]},"datasource":"$datasource","definition":"","hide":0,"hideLabel":false,"includeAll":true,"label":"Job","multi":true,"multiFormat":"regex values","name":"job","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, job)","refresh":2,"refresh_on_load":false,"regex":"","skipUrlSync":false,"sort":1,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false},{"allFormat":"regex wildcard","allValue":".*","current":{"text":"All","value":"$__all"},"datasource":"$datasource","definition":"","hide":0,"hideLabel":false,"includeAll":true,"label":"","multi":true,"multiFormat":"regex values","name":"index","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, index)","refresh":2,"refresh_on_load":false,"regex":"","skipUrlSync":false,"sort":3,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false}]},"time":{"from":"now-1h","to":"now"},"timepicker":{"now":true,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"","title":"Synapse","uid":"000000012","version":25}
diff --git a/contrib/systemd-with-workers/README.md b/contrib/systemd-with-workers/README.md
new file mode 100644
index 0000000..74b261e
--- /dev/null
+++ b/contrib/systemd-with-workers/README.md
@@ -0,0 +1,150 @@
+# Setup Synapse with Workers and Systemd
+
+This is a setup for managing synapse with systemd including support for
+managing workers. It provides a `matrix-synapse`, as well as a
+`matrix-synapse-worker@` service for any workers you require. Additionally to
+group the required services it sets up a `matrix.target`. You can use this to
+automatically start any bot- or bridge-services. More on this in
+[Bots and Bridges](#bots-and-bridges).
+
+See the folder [system](system) for any service and target files.
+
+The folder [workers](workers) contains an example configuration for the
+`federation_reader` worker. Pay special attention to the name of the
+configuration file. In order to work with the `matrix-synapse-worker@.service`
+service, it needs to have the exact same name as the worker app.
+
+This setup expects neither the homeserver nor any workers to fork. Forking is
+handled by systemd.
+
+## Setup
+
+1. Adjust your matrix configs. Make sure that the worker config files have the
+exact same name as the worker app. Compare `matrix-synapse-worker@.service` for
+why. You can find an example worker config in the [workers](workers) folder. See
+below for relevant settings in the `homeserver.yaml`.
+2. Copy the `*.service` and `*.target` files in [system](system) to
+`/etc/systemd/system`.
+3. `systemctl enable matrix-synapse.service` this adds the homeserver
+app to the `matrix.target`
+4. *Optional.* `systemctl enable
+matrix-synapse-worker@federation_reader.service` this adds the federation_reader
+app to the `matrix-synapse.service`
+5. *Optional.* Repeat step 4 for any additional workers you require.
+6. *Optional.* Add any bots or bridges by enabling them.
+7. Start all matrix related services via `systemctl start matrix.target`
+8. *Optional.* Enable autostart of all matrix related services on system boot
+via `systemctl enable matrix.target`
+
+## Usage
+
+After you have setup you can use the following commands to manage your synapse
+installation:
+
+```
+# Start matrix-synapse, all workers and any enabled bots or bridges.
+systemctl start matrix.target
+
+# Restart matrix-synapse and all workers (not necessarily restarting bots
+# or bridges, see "Bots and Bridges")
+systemctl restart matrix-synapse.service
+
+# Stop matrix-synapse and all workers (not necessarily restarting bots
+# or bridges, see "Bots and Bridges")
+systemctl stop matrix-synapse.service
+
+# Restart a specific worker (i. e. federation_reader), the homeserver is
+# unaffected by this.
+systemctl restart matrix-synapse-worker@federation_reader.service
+
+# Add a new worker (assuming all configs are setup already)
+systemctl enable matrix-synapse-worker@federation_writer.service
+systemctl restart matrix-synapse.service
+```
+
+## The Configs
+
+Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork.
+
+```
+worker_app: synapse.app.homeserver
+daemonize: false
+```
+
+None of the workers should fork, as forking is handled by systemd. Hence make
+sure this is present in all worker config files.
+
+```
+worker_daemonize: false
+```
+
+The config files of all workers are expected to be located in
+`/etc/matrix-synapse/workers`. If you want to use a different location you have
+to edit the provided `*.service` files accordingly.
+
+## Bots and Bridges
+
+Most bots and bridges do not care if the homeserver goes down or is restarted.
+Depending on the implementation this may crash them though. So look up the docs
+or ask the community of the specific bridge or bot you want to run to make sure
+you choose the correct setup.
+
+Whichever configuration you choose, after the setup the following will enable
+automatically starting (and potentially restarting) your bot/bridge with the
+`matrix.target`.
+
+```
+systemctl enable <yourBotOrBridgeName>.service
+```
+
+**Note** that from an inactive synapse the bots/bridges will only be started with
+synapse if you start the `matrix.target`, not if you start the
+`matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service`
+as *just* synapse, but `matrix.target` being anything matrix related, including
+synapse and any and all enabled bots and bridges.
+
+### Start with synapse but ignore synapse going down
+
+If the bridge can handle shutdowns of the homeserver you'll want to install the
+service in the `matrix.target` and optionally add a
+`After=matrix-synapse.service` dependency to have the bot/bridge start after
+synapse on starting everything.
+
+In this case the service file should look like this.
+
+```
+[Unit]
+# ...
+# Optional, this will only ensure that if you start everything, synapse will
+# be started before the bot/bridge will be started.
+After=matrix-synapse.service
+
+[Service]
+# ...
+
+[Install]
+WantedBy=matrix.target
+```
+
+### Stop/restart when synapse stops/restarts
+
+If the bridge can't handle shutdowns of the homeserver you'll still want to
+install the service in the `matrix.target` but also have to specify the
+`After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service`
+dependencies to have the bot/bridge stop/restart with synapse.
+
+In this case the service file should look like this.
+
+```
+[Unit]
+# ...
+# Mandatory
+After=matrix-synapse.service
+BindsTo=matrix-synapse.service
+
+[Service]
+# ...
+
+[Install]
+WantedBy=matrix.target
+```
diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service
new file mode 100644
index 0000000..9d980d5
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Synapse Matrix Worker
+After=matrix-synapse.service
+BindsTo=matrix-synapse.service
+
+[Service]
+Type=simple
+User=matrix-synapse
+WorkingDirectory=/var/lib/matrix-synapse
+EnvironmentFile=/etc/default/matrix-synapse
+ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=3
+SyslogIdentifier=matrix-synapse-%i
+
+[Install]
+WantedBy=matrix-synapse.service
diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service
new file mode 100644
index 0000000..3aae190
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix-synapse.service
@@ -0,0 +1,17 @@
+[Unit]
+Description=Synapse Matrix Homeserver
+
+[Service]
+Type=simple
+User=matrix-synapse
+WorkingDirectory=/var/lib/matrix-synapse
+EnvironmentFile=/etc/default/matrix-synapse
+ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
+ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=3
+SyslogIdentifier=matrix-synapse
+
+[Install]
+WantedBy=matrix.target
diff --git a/contrib/systemd-with-workers/system/matrix.target b/contrib/systemd-with-workers/system/matrix.target
new file mode 100644
index 0000000..aff97d0
--- /dev/null
+++ b/contrib/systemd-with-workers/system/matrix.target
@@ -0,0 +1,7 @@
+[Unit]
+Description=Contains matrix services like synapse, bridges and bots
+After=network.target
+AllowIsolate=no
+
+[Install]
+WantedBy=multi-user.target
diff --git a/contrib/systemd-with-workers/workers/federation_reader.yaml b/contrib/systemd-with-workers/workers/federation_reader.yaml
new file mode 100644
index 0000000..47c54ec
--- /dev/null
+++ b/contrib/systemd-with-workers/workers/federation_reader.yaml
@@ -0,0 +1,14 @@
+worker_app: synapse.app.federation_reader
+
+worker_replication_host: 127.0.0.1
+worker_replication_port: 9092
+worker_replication_http_port: 9093
+
+worker_listeners:
+ - type: http
+ port: 8011
+ resources:
+ - names: [federation]
+
+worker_daemonize: false
+worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service
index efb157e..595b699 100644
--- a/contrib/systemd/matrix-synapse.service
+++ b/contrib/systemd/matrix-synapse.service
@@ -22,10 +22,10 @@ Group=nogroup
WorkingDirectory=/opt/synapse
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
+SyslogIdentifier=matrix-synapse
# adjust the cache factor if necessary
# Environment=SYNAPSE_CACHE_FACTOR=2.0
[Install]
WantedBy=multi-user.target
-
diff --git a/debian/changelog b/debian/changelog
index a786521..8c2a64f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,16 @@
+matrix-synapse (0.99.5.1-1) experimental; urgency=medium
+
+ * New upstream release.
+
+ -- Andrej Shadura <andrewsh@debian.org> Wed, 22 May 2019 22:44:28 +0100
+
+matrix-synapse (0.99.3.2-1) experimental; urgency=medium
+
+ * Upload to experimental.
+ * New upstream release.
+
+ -- Andrej Shadura <andrewsh@debian.org> Fri, 03 May 2019 22:44:59 +0200
+
matrix-synapse (0.99.2-5) unstable; urgency=high
* Security updates backported from 0.99.3:
diff --git a/debian/gbp.conf b/debian/gbp.conf
index 2541c07..b9e2931 100644
--- a/debian/gbp.conf
+++ b/debian/gbp.conf
@@ -1,3 +1,3 @@
[DEFAULT]
-debian-branch = debian/master
+debian-branch = debian/experimental
upstream-branch = upstream/latest
diff --git a/debian/patches/0006-Avoid-pip-install.patch b/debian/patches/0006-Avoid-pip-install.patch
index 2813736..485f3fc 100644
--- a/debian/patches/0006-Avoid-pip-install.patch
+++ b/debian/patches/0006-Avoid-pip-install.patch
@@ -7,7 +7,7 @@ Bug: https://github.com/matrix-org/synapse/issues/3743
---
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
-@@ -92,14 +92,14 @@
+@@ -129,14 +129,14 @@
return "\n".join([
"Missing Requirements: %s" % (", ".join(self.dependencies),),
"To install run:",
@@ -19,11 +19,11 @@ Bug: https://github.com/matrix-org/synapse/issues/3743
@property
def dependencies(self):
for i in self.args[0]:
-- yield '"' + i + '"'
-+ yield 'python3-' + i
+- yield "'" + i + "'"
++ yield "python3-" + i
- def check_requirements(for_feature=None, _get_distribution=get_distribution):
+ def check_requirements(for_feature=None):
--- a/synapse/config/jwt_config.py
+++ b/synapse/config/jwt_config.py
@@ -19,7 +19,7 @@
@@ -37,7 +37,7 @@ Bug: https://github.com/matrix-org/synapse/issues/3743
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
-@@ -27,9 +27,7 @@
+@@ -57,9 +57,7 @@
"""Missing lxml library. This is required for URL preview API.
Install by running:
diff --git a/debian/patches/blacklist-localhost-by-default-for-URL-previews.patch b/debian/patches/blacklist-localhost-by-default-for-URL-previews.patch
deleted file mode 100644
index 21853e9..0000000
--- a/debian/patches/blacklist-localhost-by-default-for-URL-previews.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From 1a7104fde3abc5392b90ca084efa896d46e24f91 Mon Sep 17 00:00:00 2001
-From: Richard van der Hoff <richard@matrix.org>
-Date: Fri, 3 May 2019 13:46:50 +0100
-Subject: [PATCH] Blacklist 0.0.0.0 and :: by default for URL previews
-
----
- changelog.d/5134.bugfix | 1 +
- docs/sample_config.yaml | 14 +++++++++-----
- synapse/config/repository.py | 28 ++++++++++++++++++----------
- 3 files changed, 28 insertions(+), 15 deletions(-)
- create mode 100644 changelog.d/5134.bugfix
-
-diff --git a/changelog.d/5134.bugfix b/changelog.d/5134.bugfix
-new file mode 100644
-index 0000000000..684d48c53a
---- /dev/null
-+++ b/changelog.d/5134.bugfix
-@@ -0,0 +1 @@
-+Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too!
-diff --git a/synapse/config/repository.py b/synapse/config/repository.py
-index 3f34ad9b2a..d155d69d8a 100644
---- a/synapse/config/repository.py
-+++ b/synapse/config/repository.py
-@@ -154,17 +154,21 @@ def read_config(self, config):
- except ImportError:
- raise ConfigError(MISSING_NETADDR)
-
-- if "url_preview_ip_range_blacklist" in config:
-- self.url_preview_ip_range_blacklist = IPSet(
-- config["url_preview_ip_range_blacklist"]
-- )
-- else:
-+ if "url_preview_ip_range_blacklist" not in config:
- raise ConfigError(
- "For security, you must specify an explicit target IP address "
- "blacklist in url_preview_ip_range_blacklist for url previewing "
- "to work"
- )
-
-+ self.url_preview_ip_range_blacklist = IPSet(
-+ config["url_preview_ip_range_blacklist"]
-+ )
-+
-+ # we always blacklist '0.0.0.0' and '::', which are supposed to be
-+ # unroutable addresses.
-+ self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::'])
-+
- self.url_preview_ip_range_whitelist = IPSet(
- config.get("url_preview_ip_range_whitelist", ())
- )
-@@ -235,11 +239,11 @@ def default_config(self, data_dir_path, **kwargs):
- height: 600
- method: scale
-
-- # Is the preview URL API enabled? If enabled, you *must* specify
-- # an explicit url_preview_ip_range_blacklist of IPs that the spider is
-- # denied from accessing.
-+ # Is the preview URL API enabled?
-+ # 'False' by default: uncomment the following to enable it (and specify a
-+ # url_preview_ip_range_blacklist blacklist).
- #
-- url_preview_enabled: False
-+ #url_preview_enabled: True
-
- # List of IP address CIDR ranges that the URL preview spider is denied
- # from accessing. There are no defaults: you must explicitly
-@@ -249,6 +253,9 @@ def default_config(self, data_dir_path, **kwargs):
- # synapse to issue arbitrary GET requests to your internal services,
- # causing serious security issues.
- #
-+ # This must be specified if url_preview_enabled. It is recommended that you
-+ # uncomment the following list as a starting point.
-+ #
- #url_preview_ip_range_blacklist:
- # - '127.0.0.0/8'
- # - '10.0.0.0/8'
-@@ -259,7 +266,7 @@ def default_config(self, data_dir_path, **kwargs):
- # - '::1/128'
- # - 'fe80::/64'
- # - 'fc00::/7'
-- #
-+
- # List of IP address CIDR ranges that the URL preview spider is allowed
- # to access even if they are specified in url_preview_ip_range_blacklist.
- # This is useful for specifying exceptions to wide-ranging blacklisted
diff --git a/debian/patches/config-add-signing_key_path.patch b/debian/patches/config-add-signing_key_path.patch
deleted file mode 100644
index 756d772..0000000
--- a/debian/patches/config-add-signing_key_path.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Subject: Make it possible to request signing_key_path using a read command
-
---- a/synapse/config/key.py
-+++ b/synapse/config/key.py
-@@ -39,6 +39,7 @@
-
- def read_config(self, config):
- self.signing_key = self.read_signing_key(config["signing_key_path"])
-+ self.signing_key_path = config["signing_key_path"]
- self.old_signing_keys = self.read_old_signing_keys(
- config.get("old_signing_keys", {})
- )
diff --git a/debian/patches/series b/debian/patches/series
index b216485..92daf49 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,6 +1,3 @@
0002-change_instructions.patch
0006-Avoid-pip-install.patch
fix-deps.patch
-config-add-signing_key_path.patch
-blacklist-localhost-by-default-for-URL-previews.patch
-use-SystemRandom-for-token-generation.patch
diff --git a/debian/patches/use-SystemRandom-for-token-generation.patch b/debian/patches/use-SystemRandom-for-token-generation.patch
deleted file mode 100644
index cfe8f8d..0000000
--- a/debian/patches/use-SystemRandom-for-token-generation.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 247dc1bd0bd9ee2b9525495c0dbd819baf10ec1f Mon Sep 17 00:00:00 2001
-From: Richard van der Hoff <richard@matrix.org>
-Date: Fri, 3 May 2019 12:38:03 +0100
-Subject: [PATCH] Use SystemRandom for token generation
-
----
- changelog.d/5133.bugfix | 1 +
- synapse/util/stringutils.py | 9 +++++++--
- 2 files changed, 8 insertions(+), 2 deletions(-)
- create mode 100644 changelog.d/5133.bugfix
-
-diff --git a/changelog.d/5133.bugfix b/changelog.d/5133.bugfix
-new file mode 100644
-index 0000000000..12a32a906b
---- /dev/null
-+++ b/changelog.d/5133.bugfix
-@@ -0,0 +1 @@
-+Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for for identifying and responsibly disclosing this issue!
-diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
-index fdcb375f95..69dffd8244 100644
---- a/synapse/util/stringutils.py
-+++ b/synapse/util/stringutils.py
-@@ -24,14 +24,19 @@
- string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
- )
-
-+# random_string and random_string_with_symbols are used for a range of things,
-+# some cryptographically important, some less so. We use SystemRandom to make sure
-+# we get cryptographically-secure randoms.
-+rand = random.SystemRandom()
-+
-
- def random_string(length):
-- return ''.join(random.choice(string.ascii_letters) for _ in range(length))
-+ return ''.join(rand.choice(string.ascii_letters) for _ in range(length))
-
-
- def random_string_with_symbols(length):
- return ''.join(
-- random.choice(_string_with_symbols) for _ in range(length)
-+ rand.choice(_string_with_symbols) for _ in range(length)
- )
-
-
diff --git a/demo/start.sh b/demo/start.sh
index dcc4d6f..c4a1328 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -27,17 +27,27 @@ for port in 8080 8081 8082; do
--config-path "$DIR/etc/$port.config" \
--report-stats no
+ printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
+ echo 'enable_registration: true' >> $DIR/etc/$port.config
+
# Check script parameters
if [ $# -eq 1 ]; then
if [ $1 = "--no-rate-limit" ]; then
- # Set high limits in config file to disable rate limiting
- perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
- perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
+ # messages rate limit
+ echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config
+ echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config
+
+ # registration rate limit
+ printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
+
+ # login rate limit
+ echo 'rc_login:' >> $DIR/etc/$port.config
+ printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
+ printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
+ printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config
fi
fi
- perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
-
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
fi
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index 224c923..ceedbad 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -50,12 +50,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
debhelper \
devscripts \
dh-systemd \
+ libsystemd-dev \
lsb-release \
+ pkg-config \
python3-dev \
python3-pip \
python3-setuptools \
python3-venv \
- sqlite3
+ sqlite3 \
+ libpq-dev
COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
diff --git a/docker/README.md b/docker/README.md
index 3faedf6..b27a692 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -28,9 +28,10 @@ with your postgres database.
docker run \
-d \
--name synapse \
- -v ${DATA_PATH}:/data \
+ --mount type=volume,src=synapse-data,dst=/data \
-e SYNAPSE_SERVER_NAME=my.matrix.host \
-e SYNAPSE_REPORT_STATS=yes \
+ -p 8448:8448 \
matrixdotorg/synapse:latest
```
@@ -57,9 +58,10 @@ configuration file there. Multiple application services are supported.
Synapse requires a valid TLS certificate. You can do one of the following:
* Provide your own certificate and key (as
- `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.crt` and
- `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.key`, or elsewhere by providing an
- entire config as `${SYNAPSE_CONFIG_PATH}`).
+ `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.tls.crt` and
+ `${DATA_PATH}/${SYNAPSE_SERVER_NAME}.tls.key`, or elsewhere by providing an
+ entire config as `${SYNAPSE_CONFIG_PATH}`). In this case, you should forward
+ traffic to port 8448 in the container, for example with `-p 443:8448`.
* Use a reverse proxy to terminate incoming TLS, and forward the plain http
traffic to port 8008 in the container. In this case you should set `-e
@@ -87,16 +89,22 @@ Global settings:
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
-then customize it manually. No other environment variable is required.
+then customize it manually: see [Generating a config
+file](#generating-a-config-file).
-Otherwise, a dynamic configuration file will be used. The following environment
-variables are available for configuration:
+Otherwise, a dynamic configuration file will be used.
+
+### Environment variables used to build a dynamic configuration file
+
+The following environment variables are used to build the configuration file
+when ``SYNAPSE_CONFIG_PATH`` is not set.
* ``SYNAPSE_SERVER_NAME`` (mandatory), the server public hostname.
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
statistics reporting back to the Matrix project which helps us to get funding.
-* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
- you run your own TLS-capable reverse proxy).
+* `SYNAPSE_NO_TLS`, (accepts `true`, `false`, `on`, `off`, `1`, `0`, `yes`, `no`]): disable
+ TLS in Synapse (use this if you run your own TLS-capable reverse proxy). Defaults
+ to `false` (ie, TLS is enabled by default).
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
the Synapse instance.
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
@@ -132,7 +140,7 @@ Database specific values (will use SQLite if not set):
**NOTE**: You are highly encouraged to use postgresql! Please use the compose
file to make it easier to deploy.
* `POSTGRES_USER` - The user for the synapse postgres database. [default:
- `matrix`]
+ `synapse`]
Mail server specific values (will not send emails if not set):
@@ -143,3 +151,31 @@ Mail server specific values (will not send emails if not set):
any.
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail
server if any.
+
+### Generating a config file
+
+It is possible to generate a basic configuration file for use with
+`SYNAPSE_CONFIG_PATH` using the `generate` commandline option. You will need to
+specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and
+`SYNAPSE_REPORT_STATS`, and mount a docker volume to store the data on. For
+example:
+
+```
+docker run -it --rm
+ --mount type=volume,src=synapse-data,dst=/data \
+ -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
+ -e SYNAPSE_SERVER_NAME=my.matrix.host \
+ -e SYNAPSE_REPORT_STATS=yes \
+ matrixdotorg/synapse:latest generate
+```
+
+This will generate a `homeserver.yaml` in (typically)
+`/var/lib/docker/volumes/synapse-data/_data`, which you can then customise and
+use with:
+
+```
+docker run -d --name synapse \
+ --mount type=volume,src=synapse-data,dst=/data \
+ -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
+ matrixdotorg/synapse:latest
+```
diff --git a/docker/start.py b/docker/start.py
index 941d999..2da5552 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -59,6 +59,18 @@ else:
if not os.path.exists("/compiled"): os.mkdir("/compiled")
config_path = "/compiled/homeserver.yaml"
+
+ # Convert SYNAPSE_NO_TLS to boolean if exists
+ if "SYNAPSE_NO_TLS" in environ:
+ tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"])
+ if tlsanswerstring in ("true", "on", "1", "yes"):
+ environ["SYNAPSE_NO_TLS"] = True
+ else:
+ if tlsanswerstring in ("false", "off", "0", "no"):
+ environ["SYNAPSE_NO_TLS"] = False
+ else:
+ print("Environment variable \"SYNAPSE_NO_TLS\" found but value \"" + tlsanswerstring + "\" unrecognized; exiting.")
+ sys.exit(2)
convert("/conf/homeserver.yaml", config_path, environ)
convert("/conf/log.config", "/compiled/log.config", environ)
diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml
new file mode 100644
index 0000000..e001ef5
--- /dev/null
+++ b/docs/.sample_config_header.yaml
@@ -0,0 +1,12 @@
+# The config is maintained as an up-to-date snapshot of the default
+# homeserver.yaml configuration generated by Synapse.
+#
+# It is intended to act as a reference for the default configuration,
+# helping admins keep track of new options and other changes, and compare
+# their configs with the current default. As such, many of the actual
+# config values shown are placeholders.
+#
+# It is *not* intended to be copied and used as the basis for a real
+# homeserver.yaml. Instead, if you are starting from scratch, please generate
+# a fresh config using Synapse by following the instructions in INSTALL.md.
+
diff --git a/docs/ACME.md b/docs/ACME.md
index 46136a9..9eb18a9 100644
--- a/docs/ACME.md
+++ b/docs/ACME.md
@@ -67,7 +67,7 @@ For nginx users, add the following line to your existing `server` block:
```
location /.well-known/acme-challenge {
- proxy_pass http://localhost:8009/;
+ proxy_pass http://localhost:8009;
}
```
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
index 8eb2265..ebfb20f 100644
--- a/docs/MSC1711_certificates_FAQ.md
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -177,7 +177,6 @@ You can do this with a `.well-known` file as follows:
on `customer.example.net:8000` it correctly handles HTTP requests with
Host header set to `customer.example.net:8000`.
-
## FAQ
### Synapse 0.99.0 has just been released, what do I need to do right now?
diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst
new file mode 100644
index 0000000..7559de4
--- /dev/null
+++ b/docs/admin_api/account_validity.rst
@@ -0,0 +1,42 @@
+Account validity API
+====================
+
+This API allows a server administrator to manage the validity of an account. To
+use it, you must enable the account validity feature (under
+``account_validity``) in Synapse's configuration.
+
+Renew account
+-------------
+
+This API extends the validity of an account by as much time as configured in the
+``period`` parameter from the ``account_validity`` configuration.
+
+The API is::
+
+ POST /_synapse/admin/v1/account_validity/validity
+
+with the following body:
+
+.. code:: json
+
+ {
+ "user_id": "<user ID for the account to renew>",
+ "expiration_ts": 0,
+ "enable_renewal_emails": true
+ }
+
+
+``expiration_ts`` is an optional parameter and overrides the expiration date,
+which otherwise defaults to now + validity period.
+
+``enable_renewal_emails`` is also an optional parameter and enables/disables
+sending renewal emails to the user. Defaults to true.
+
+The API returns with the new expiration date for this account, as a timestamp in
+milliseconds since epoch:
+
+.. code:: json
+
+ {
+ "expiration_ts": 0
+ }
diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md
new file mode 100644
index 0000000..1710488
--- /dev/null
+++ b/docs/admin_api/delete_group.md
@@ -0,0 +1,14 @@
+# Delete a local group
+
+This API lets a server admin delete a local group. Doing so will kick all
+users out of the group so that their clients will correctly handle the group
+being deleted.
+
+
+The API is:
+
+```
+POST /_synapse/admin/v1/delete_group/<group_id>
+```
+
+including an `access_token` of a server admin.
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index abdbc1e..5e9f8e5 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -4,7 +4,7 @@ This API gets a list of known media in a room.
The API is:
```
-GET /_matrix/client/r0/admin/room/<room_id>/media
+GET /_synapse/admin/v1/room/<room_id>/media
```
including an `access_token` of a server admin.
diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst
index a5c3dc8..f7be226 100644
--- a/docs/admin_api/purge_history_api.rst
+++ b/docs/admin_api/purge_history_api.rst
@@ -10,7 +10,7 @@ paginate further back in the room from the point being purged from.
The API is:
-``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
+``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
including an ``access_token`` of a server admin.
@@ -49,7 +49,7 @@ Purge status query
It is possible to poll for updates on recent purges with a second API;
-``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
+``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
(again, with a suitable ``access_token``). This API returns a JSON body like
the following:
diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst
index 5deb02a..dacd5bc 100644
--- a/docs/admin_api/purge_remote_media.rst
+++ b/docs/admin_api/purge_remote_media.rst
@@ -6,7 +6,7 @@ media.
The API is::
- POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
+ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
{}
diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst
index 084e74e..3a63109 100644
--- a/docs/admin_api/register_api.rst
+++ b/docs/admin_api/register_api.rst
@@ -12,7 +12,7 @@ is not enabled.
To fetch the nonce, you need to request one from the API::
- > GET /_matrix/client/r0/admin/register
+ > GET /_synapse/admin/v1/register
< {"nonce": "thisisanonce"}
@@ -22,7 +22,7 @@ body containing the nonce, username, password, whether they are an admin
As an example::
- > POST /_matrix/client/r0/admin/register
+ > POST /_synapse/admin/v1/register
> {
"nonce": "thisisanonce",
"username": "pepper_roni",
diff --git a/docs/admin_api/server_notices.md b/docs/admin_api/server_notices.md
new file mode 100644
index 0000000..858b052
--- /dev/null
+++ b/docs/admin_api/server_notices.md
@@ -0,0 +1,48 @@
+# Server Notices
+
+The API to send notices is as follows:
+
+```
+POST /_synapse/admin/v1/send_server_notice
+```
+
+or:
+
+```
+PUT /_synapse/admin/v1/send_server_notice/{txnId}
+```
+
+You will need to authenticate with an access token for an admin user.
+
+When using the `PUT` form, retransmissions with the same transaction ID will be
+ignored in the same way as with `PUT
+/_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`.
+
+The request body should look something like the following:
+
+```json
+{
+ "user_id": "@target_user:server_name",
+ "content": {
+ "msgtype": "m.text",
+ "body": "This is my message"
+ }
+}
+```
+
+You can optionally include the following additional parameters:
+
+* `type`: the type of event. Defaults to `m.room.message`.
+* `state_key`: Setting this will result in a state event being sent.
+
+
+Once the notice has been sent, the API will return the following response:
+
+```json
+{
+ "event_id": "<event_id>"
+}
+```
+
+Note that server notices must be enabled in `homeserver.yaml` before this API
+can be used. See [server_notices.md](../server_notices.md) for more information.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index d17121a..8aca4f1 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -5,7 +5,7 @@ This API returns information about a specific user account.
The api is::
- GET /_matrix/client/r0/admin/whois/<user_id>
+ GET /_synapse/admin/v1/whois/<user_id>
including an ``access_token`` of a server admin.
@@ -50,7 +50,7 @@ references to it).
The api is::
- POST /_matrix/client/r0/admin/deactivate/<user_id>
+ POST /_synapse/admin/v1/deactivate/<user_id>
with a body of:
@@ -73,7 +73,7 @@ Changes the password of another user.
The api is::
- POST /_matrix/client/r0/admin/reset_password/<user_id>
+ POST /_synapse/admin/v1/reset_password/<user_id>
with a body of:
diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.rst
new file mode 100644
index 0000000..833d902
--- /dev/null
+++ b/docs/admin_api/version_api.rst
@@ -0,0 +1,20 @@
+Version API
+===========
+
+This API returns the running Synapse version and the Python version
+on which Synapse is being run. This is useful when a Synapse instance
+is behind a proxy that does not forward the 'Server' header (which also
+contains Synapse version information).
+
+The api is::
+
+ GET /_synapse/admin/v1/server_version
+
+It returns a JSON body like the following:
+
+.. code:: json
+
+ {
+ "server_version": "0.99.2rc1 (b=develop, abcdef123)",
+ "python_version": "3.6.8"
+ }
diff --git a/docs/federate.md b/docs/federate.md
new file mode 100644
index 0000000..b7fc096
--- /dev/null
+++ b/docs/federate.md
@@ -0,0 +1,123 @@
+Setting up Federation
+=====================
+
+Federation is the process by which users on different servers can participate
+in the same room. For this to work, those other servers must be able to contact
+yours to send messages.
+
+The ``server_name`` configured in the Synapse configuration file (often
+``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
+identified (eg: ``@user:example.com``, ``#room:example.com``). By
+default, it is also the domain that other servers will use to
+try to reach your server (via port 8448). This is easy to set
+up and will work provided you set the ``server_name`` to match your
+machine's public DNS hostname, and provide Synapse with a TLS certificate
+which is valid for your ``server_name``.
+
+Once you have completed the steps necessary to federate, you should be able to
+join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
+room for Synapse admins.)
+
+
+## Delegation
+
+For a more flexible configuration, you can have ``server_name``
+resources (eg: ``@user:example.com``) served by a different host and
+port (eg: ``synapse.example.com:443``). There are two ways to do this:
+
+- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
+- adding a DNS ``SRV`` record in the DNS zone of domain
+ ``example.com``.
+
+Without configuring delegation, the matrix federation will
+expect to find your server via ``example.com:8448``. The following methods
+allow you retain a `server_name` of `example.com` so that your user IDs, room
+aliases, etc continue to look like `*:example.com`, whilst having your
+federation traffic routed to a different server.
+
+### .well-known delegation
+
+To use this method, you need to be able to alter the
+``server_name`` 's https server to serve the ``/.well-known/matrix/server``
+URL. Having an active server (with a valid TLS certificate) serving your
+``server_name`` domain is out of the scope of this documentation.
+
+The URL ``https://<server_name>/.well-known/matrix/server`` should
+return a JSON structure containing the key ``m.server`` like so:
+
+ {
+ "m.server": "<synapse.server.name>[:<yourport>]"
+ }
+
+In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
+should return:
+
+ {
+ "m.server": "synapse.example.com:443"
+ }
+
+Note, specifying a port is optional. If a port is not specified an SRV lookup
+is performed, as described below. If the target of the
+delegation does not have an SRV record, then the port defaults to 8448.
+
+Most installations will not need to configure .well-known. However, it can be
+useful in cases where the admin is hosting on behalf of someone else and
+therefore cannot gain access to the necessary certificate. With .well-known,
+federation servers will check for a valid TLS certificate for the delegated
+hostname (in our example: ``synapse.example.com``).
+
+.well-known support first appeared in Synapse v0.99.0. To federate with older
+servers you may need to additionally configure SRV delegation. Alternatively,
+encourage the server admin in question to upgrade :).
+
+### DNS SRV delegation
+
+To use this delegation method, you need to have write access to your
+``server_name`` 's domain zone DNS records (in our example it would be
+``example.com`` DNS zone).
+
+This method requires the target server to provide a
+valid TLS certificate for the original ``server_name``.
+
+You need to add a SRV record in your ``server_name`` 's DNS zone with
+this format:
+
+ _matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
+
+In our example, we would need to add this SRV record in the
+``example.com`` DNS zone:
+
+ _matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
+
+Once done and set up, you can check the DNS record with ``dig -t srv
+_matrix._tcp.<server_name>``. In our example, we would expect this:
+
+ $ dig -t srv _matrix._tcp.example.com
+ _matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
+
+Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
+directly to the server hosting the synapse instance.
+
+## Troubleshooting
+
+You can use the [federation tester](
+<https://matrix.org/federationtester>) to check if your homeserver is
+configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
+Note that you'll have to modify this URL to replace ``DOMAIN`` with your
+``server_name``. Hitting the API directly provides extra detail.
+
+The typical failure mode for federation is that when the server tries to join
+a room, it is rejected with "401: Unauthorized". Generally this means that other
+servers in the room could not access yours. (Joining a room over federation is
+a complicated dance which requires connections in both directions).
+
+Another common problem is that people on other servers can't join rooms that
+you invite them to. This can be caused by an incorrectly-configured reverse
+proxy: see [reverse_proxy.rst](<reverse_proxy.rst>) for instructions on how to correctly
+configure a reverse proxy.
+
+## Running a Demo Federation of Synapses
+
+If you want to get up and running quickly with a trio of homeservers in a
+private federation, there is a script in the ``demo`` directory. This is mainly
+useful just for development purposes. See [demo/README](<../demo/README>).
diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst
index 5bbb5a4..32b064e 100644
--- a/docs/metrics-howto.rst
+++ b/docs/metrics-howto.rst
@@ -48,7 +48,10 @@ How to monitor Synapse metrics using Prometheus
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
static_configs:
- - targets: ["my.server.here:9092"]
+ - targets: ["my.server.here:port"]
+
+ where ``my.server.here`` is the IP address of Synapse, and ``port`` is the listener port
+ configured with the ``metrics`` resource.
If your prometheus is older than 1.5.2, you will need to replace
``static_configs`` in the above with ``target_groups``.
diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst
index d8a7b61..6149ba7 100644
--- a/docs/password_auth_providers.rst
+++ b/docs/password_auth_providers.rst
@@ -75,6 +75,20 @@ Password auth provider classes may optionally provide the following methods.
result from the ``/login`` call (including ``access_token``, ``device_id``,
etc.)
+``someprovider.check_3pid_auth``\(*medium*, *address*, *password*)
+
+ This method, if implemented, is called when a user attempts to register or
+ log in with a third party identifier, such as email. It is passed the
+ medium (ex. "email"), an address (ex. "jdoe@example.com") and the user's
+ password.
+
+ The method should return a Twisted ``Deferred`` object, which resolves to
+ a ``str`` containing the user's (canonical) User ID if authentication was
+ successful, and ``None`` if not.
+
+ As with ``check_auth``, the ``Deferred`` may alternatively resolve to a
+ ``(user_id, callback)`` tuple.
+
``someprovider.check_password``\(*user_id*, *password*)
This method provides a simpler interface than ``get_supported_login_types``
diff --git a/docs/postgres.rst b/docs/postgres.rst
index 2377542..e81e104 100644
--- a/docs/postgres.rst
+++ b/docs/postgres.rst
@@ -3,6 +3,28 @@ Using Postgres
Postgres version 9.4 or later is known to work.
+Install postgres client libraries
+=================================
+
+Synapse will require the python postgres client library in order to connect to
+a postgres database.
+
+* If you are using the `matrix.org debian/ubuntu
+ packages <../INSTALL.md#matrixorg-packages>`_,
+ the necessary libraries will already be installed.
+
+* For other pre-built packages, please consult the documentation from the
+ relevant package.
+
+* If you installed synapse `in a virtualenv
+ <../INSTALL.md#installing-from-source>`_, you can install the library with::
+
+ ~/synapse/env/bin/pip install matrix-synapse[postgres]
+
+ (substituting the path to your virtualenv for ``~/synapse/env``, if you used a
+ different path). You will require the postgres development files. These are in
+ the ``libpq-dev`` package on Debian-derived distributions.
+
Set up database
===============
@@ -26,28 +48,23 @@ encoding use, e.g.::
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
-Set up client in Debian/Ubuntu
-===========================
-
-Postgres support depends on the postgres python connector ``psycopg2``. In the
-virtual env::
-
- sudo apt-get install libpq-dev
- pip install psycopg2
+Tuning Postgres
+===============
-Set up client in RHEL/CentOs 7
-==============================
+The default settings should be fine for most deployments. For larger scale
+deployments tuning some of the settings is recommended, details of which can be
+found at https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server.
-Make sure you have the appropriate version of postgres-devel installed. For a
-postgres 9.4, use the postgres 9.4 packages from
-[here](https://wiki.postgresql.org/wiki/YUM_Installation).
+In particular, we've found tuning the following values helpful for performance:
-As with Debian/Ubuntu, postgres support depends on the postgres python connector
-``psycopg2``. In the virtual env::
+- ``shared_buffers``
+- ``effective_cache_size``
+- ``work_mem``
+- ``maintenance_work_mem``
+- ``autovacuum_work_mem``
- sudo yum install postgresql-devel libpqxx-devel.x86_64
- export PATH=/usr/pgsql-9.4/bin/:$PATH
- pip install psycopg2
+Note that the appropriate values for those fields depend on the amount of free
+memory the database host has available.
Synapse config
==============
@@ -129,8 +146,8 @@ Once that has completed, change the synapse config to point at the PostgreSQL
database configuration file ``homeserver-postgres.yaml``::
./synctl stop
- mv homeserver.yaml homeserver-old-sqlite.yaml
- mv homeserver-postgres.yaml homeserver.yaml
+ mv homeserver.yaml homeserver-old-sqlite.yaml
+ mv homeserver-postgres.yaml homeserver.yaml
./synctl start
Synapse should now be running against PostgreSQL.
diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst
index 4706061..7619b10 100644
--- a/docs/reverse_proxy.rst
+++ b/docs/reverse_proxy.rst
@@ -18,7 +18,7 @@ servers do not necessarily need to connect to your server via the same server
name or port. Indeed, clients will use port 443 by default, whereas servers
default to port 8448. Where these are different, we refer to the 'client port'
and the 'federation port'. See `Setting up federation
-<../README.rst#setting-up-federation>`_ for more details of the algorithm used for
+<federate.md>`_ for more details of the algorithm used for
federation connections.
Let's assume that we expect clients to connect to our server at
@@ -69,37 +69,33 @@ Let's assume that we expect clients to connect to our server at
SSLEngine on
ServerName matrix.example.com;
- <Location /_matrix>
- ProxyPass http://127.0.0.1:8008/_matrix nocanon
- ProxyPassReverse http://127.0.0.1:8008/_matrix
- </Location>
+ AllowEncodedSlashes NoDecode
+ ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+ ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
<VirtualHost *:8448>
SSLEngine on
ServerName example.com;
-
- <Location /_matrix>
- ProxyPass http://127.0.0.1:8008/_matrix nocanon
- ProxyPassReverse http://127.0.0.1:8008/_matrix
- </Location>
+
+ AllowEncodedSlashes NoDecode
+ ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+ ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
</VirtualHost>
* HAProxy::
frontend https
- bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
- bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
-
+ bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+
# Matrix client traffic
acl matrix hdr(host) -i matrix.example.com
use_backend matrix if matrix
-
+
frontend matrix-federation
- bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
- bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+ bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
default_backend matrix
-
+
backend matrix
server matrix 127.0.0.1:8008
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
new file mode 100644
index 0000000..f658ec8
--- /dev/null
+++ b/docs/sample_config.yaml
@@ -0,0 +1,1238 @@
+# The config is maintained as an up-to-date snapshot of the default
+# homeserver.yaml configuration generated by Synapse.
+#
+# It is intended to act as a reference for the default configuration,
+# helping admins keep track of new options and other changes, and compare
+# their configs with the current default. As such, many of the actual
+# config values shown are placeholders.
+#
+# It is *not* intended to be copied and used as the basis for a real
+# homeserver.yaml. Instead, if you are starting from scratch, please generate
+# a fresh config using Synapse by following the instructions in INSTALL.md.
+
+## Server ##
+
+# The domain name of the server, with optional explicit port.
+# This is used by remote servers to connect to this server,
+# e.g. matrix.org, localhost:8080, etc.
+# This is also the last part of your UserID.
+#
+server_name: "SERVERNAME"
+
+# When running as a daemon, the file to store the pid in
+#
+pid_file: DATADIR/homeserver.pid
+
+# CPU affinity mask. Setting this restricts the CPUs on which the
+# process will be scheduled. It is represented as a bitmask, with the
+# lowest order bit corresponding to the first logical CPU and the
+# highest order bit corresponding to the last logical CPU. Not all CPUs
+# may exist on a given system but a mask may specify more CPUs than are
+# present.
+#
+# For example:
+# 0x00000001 is processor #0,
+# 0x00000003 is processors #0 and #1,
+# 0xFFFFFFFF is all processors (#0 through #31).
+#
+# Pinning a Python process to a single CPU is desirable, because Python
+# is inherently single-threaded due to the GIL, and can suffer a
+# 30-40% slowdown due to cache blow-out and thread context switching
+# if the scheduler happens to schedule the underlying threads across
+# different cores. See
+# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
+#
+# This setting requires the affinity package to be installed!
+#
+#cpu_affinity: 0xFFFFFFFF
+
+# The path to the web client which will be served at /_matrix/client/
+# if 'webclient' is configured under the 'listeners' configuration.
+#
+#web_client_location: "/path/to/web/root"
+
+# The public-facing base URL that clients use to access this HS
+# (not including _matrix/...). This is the same URL a user would
+# enter into the 'custom HS URL' field on their client. If you
+# use synapse with a reverse proxy, this should be the URL to reach
+# synapse via the proxy.
+#
+#public_baseurl: https://example.com/
+
+# Set the soft limit on the number of file descriptors synapse can use
+# Zero is used to indicate synapse should set the soft limit to the
+# hard limit.
+#
+#soft_file_limit: 0
+
+# Set to false to disable presence tracking on this homeserver.
+#
+#use_presence: false
+
+# Whether to require authentication to retrieve profile data (avatars,
+# display names) of other users through the client API. Defaults to
+# 'false'. Note that profile data is also available via the federation
+# API, so this setting is of limited value if federation is enabled on
+# the server.
+#
+#require_auth_for_profile_requests: true
+
+# If set to 'true', requires authentication to access the server's
+# public rooms directory through the client API, and forbids any other
+# homeserver to fetch it via federation. Defaults to 'false'.
+#
+#restrict_public_rooms_to_local_users: true
+
+# The GC threshold parameters to pass to `gc.set_threshold`, if defined
+#
+#gc_thresholds: [700, 10, 10]
+
+# Set the limit on the returned events in the timeline in the get
+# and sync operations. The default value is -1, means no upper limit.
+#
+#filter_timeline_limit: 5000
+
+# Whether room invites to users on this server should be blocked
+# (except those sent by local server admins). The default is False.
+#
+#block_non_admin_invites: True
+
+# Room searching
+#
+# If disabled, new messages will not be indexed for searching and users
+# will receive errors when searching for messages. Defaults to enabled.
+#
+#enable_search: false
+
+# Restrict federation to the following whitelist of domains.
+# N.B. we recommend also firewalling your federation listener to limit
+# inbound federation traffic as early as possible, rather than relying
+# purely on this application-layer restriction. If not specified, the
+# default is to whitelist everything.
+#
+#federation_domain_whitelist:
+# - lon.example.com
+# - nyc.example.com
+# - syd.example.com
+
+# Prevent federation requests from being sent to the following
+# blacklist IP address CIDR ranges. If this option is not specified, or
+# specified with an empty list, no ip range blacklist will be enforced.
+#
+# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+# listed here, since they correspond to unroutable addresses.)
+#
+federation_ip_range_blacklist:
+ - '127.0.0.0/8'
+ - '10.0.0.0/8'
+ - '172.16.0.0/12'
+ - '192.168.0.0/16'
+ - '100.64.0.0/10'
+ - '169.254.0.0/16'
+ - '::1/128'
+ - 'fe80::/64'
+ - 'fc00::/7'
+
+# List of ports that Synapse should listen on, their purpose and their
+# configuration.
+#
+# Options for each listener include:
+#
+# port: the TCP port to bind to
+#
+# bind_addresses: a list of local addresses to listen on. The default is
+# 'all local interfaces'.
+#
+# type: the type of listener. Normally 'http', but other valid options are:
+# 'manhole' (see docs/manhole.md),
+# 'metrics' (see docs/metrics-howto.rst),
+# 'replication' (see docs/workers.rst).
+#
+# tls: set to true to enable TLS for this listener. Will use the TLS
+# key/cert specified in tls_private_key_path / tls_certificate_path.
+#
+# x_forwarded: Only valid for an 'http' listener. Set to true to use the
+# X-Forwarded-For header as the client IP. Useful when Synapse is
+# behind a reverse-proxy.
+#
+# resources: Only valid for an 'http' listener. A list of resources to host
+# on this port. Options for each resource are:
+#
+# names: a list of names of HTTP resources. See below for a list of
+# valid resource names.
+#
+# compress: set to true to enable HTTP comression for this resource.
+#
+# additional_resources: Only valid for an 'http' listener. A map of
+# additional endpoints which should be loaded via dynamic modules.
+#
+# Valid resource names are:
+#
+# client: the client-server API (/_matrix/client), and the synapse admin
+# API (/_synapse/admin). Also implies 'media' and 'static'.
+#
+# consent: user consent forms (/_matrix/consent). See
+# docs/consent_tracking.md.
+#
+# federation: the server-server API (/_matrix/federation). Also implies
+# 'media', 'keys', 'openid'
+#
+# keys: the key discovery API (/_matrix/keys).
+#
+# media: the media API (/_matrix/media).
+#
+# metrics: the metrics interface. See docs/metrics-howto.rst.
+#
+# openid: OpenID authentication.
+#
+# replication: the HTTP replication API (/_synapse/replication). See
+# docs/workers.rst.
+#
+# static: static resources under synapse/static (/_matrix/static). (Mostly
+# useful for 'fallback authentication'.)
+#
+# webclient: A web client. Requires web_client_location to be set.
+#
+listeners:
+ # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
+ #
+ # Disabled by default. To enable it, uncomment the following. (Note that you
+ # will also need to give Synapse a TLS key and certificate: see the TLS section
+ # below.)
+ #
+ #- port: 8448
+ # type: http
+ # tls: true
+ # resources:
+ # - names: [client, federation]
+
+ # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
+ # that unwraps TLS.
+ #
+ # If you plan to use a reverse proxy, please see
+ # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
+ #
+ - port: 8008
+ tls: false
+ bind_addresses: ['::1', '127.0.0.1']
+ type: http
+ x_forwarded: true
+
+ resources:
+ - names: [client, federation]
+ compress: false
+
+ # example additonal_resources:
+ #
+ #additional_resources:
+ # "/_matrix/my/custom/endpoint":
+ # module: my_module.CustomRequestHandler
+ # config: {}
+
+ # Turn on the twisted ssh manhole service on localhost on the given
+ # port.
+ #
+ #- port: 9000
+ # bind_addresses: ['::1', '127.0.0.1']
+ # type: manhole
+
+
+## Homeserver blocking ##
+
+# How to reach the server admin, used in ResourceLimitError
+#
+#admin_contact: 'mailto:admin@server.com'
+
+# Global blocking
+#
+#hs_disabled: False
+#hs_disabled_message: 'Human readable reason for why the HS is blocked'
+#hs_disabled_limit_type: 'error code(str), to help clients decode reason'
+
+# Monthly Active User Blocking
+#
+#limit_usage_by_mau: False
+#max_mau_value: 50
+#mau_trial_days: 2
+
+# If enabled, the metrics for the number of monthly active users will
+# be populated, however no one will be limited. If limit_usage_by_mau
+# is true, this is implied to be true.
+#
+#mau_stats_only: False
+
+# Sometimes the server admin will want to ensure certain accounts are
+# never blocked by mau checking. These accounts are specified here.
+#
+#mau_limit_reserved_threepids:
+# - medium: 'email'
+# address: 'reserved_user@example.com'
+
+# Used by phonehome stats to group together related servers.
+#server_context: context
+
+# Whether to require a user to be in the room to add an alias to it.
+# Defaults to 'true'.
+#
+#require_membership_for_aliases: false
+
+# Whether to allow per-room membership profiles through the send of membership
+# events with profile information that differ from the target's global profile.
+# Defaults to 'true'.
+#
+#allow_per_room_profiles: false
+
+
+## TLS ##
+
+# PEM-encoded X509 certificate for TLS.
+# This certificate, as of Synapse 1.0, will need to be a valid and verifiable
+# certificate, signed by a recognised Certificate Authority.
+#
+# See 'ACME support' below to enable auto-provisioning this certificate via
+# Let's Encrypt.
+#
+# If supplying your own, be sure to use a `.pem` file that includes the
+# full certificate chain including any intermediate certificates (for
+# instance, if using certbot, use `fullchain.pem` as your certificate,
+# not `cert.pem`).
+#
+#tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt"
+
+# PEM-encoded private key for TLS
+#
+#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
+
+# Whether to verify TLS certificates when sending federation traffic.
+#
+# This currently defaults to `false`, however this will change in
+# Synapse 1.0 when valid federation certificates will be required.
+#
+#federation_verify_certificates: true
+
+# Skip federation certificate verification on the following whitelist
+# of domains.
+#
+# This setting should only be used in very specific cases, such as
+# federation over Tor hidden services and similar. For private networks
+# of homeservers, you likely want to use a private CA instead.
+#
+# Only effective if federation_verify_certicates is `true`.
+#
+#federation_certificate_verification_whitelist:
+# - lon.example.com
+# - *.domain.com
+# - *.onion
+
+# List of custom certificate authorities for federation traffic.
+#
+# This setting should only normally be used within a private network of
+# homeservers.
+#
+# Note that this list will replace those that are provided by your
+# operating environment. Certificates must be in PEM format.
+#
+#federation_custom_ca_list:
+# - myCA1.pem
+# - myCA2.pem
+# - myCA3.pem
+
+# ACME support: This will configure Synapse to request a valid TLS certificate
+# for your configured `server_name` via Let's Encrypt.
+#
+# Note that provisioning a certificate in this way requires port 80 to be
+# routed to Synapse so that it can complete the http-01 ACME challenge.
+# By default, if you enable ACME support, Synapse will attempt to listen on
+# port 80 for incoming http-01 challenges - however, this will likely fail
+# with 'Permission denied' or a similar error.
+#
+# There are a couple of potential solutions to this:
+#
+# * If you already have an Apache, Nginx, or similar listening on port 80,
+# you can configure Synapse to use an alternate port, and have your web
+# server forward the requests. For example, assuming you set 'port: 8009'
+# below, on Apache, you would write:
+#
+# ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-challenge
+#
+# * Alternatively, you can use something like `authbind` to give Synapse
+# permission to listen on port 80.
+#
+acme:
+ # ACME support is disabled by default. Uncomment the following line
+ # (and tls_certificate_path and tls_private_key_path above) to enable it.
+ #
+ #enabled: true
+
+ # Endpoint to use to request certificates. If you only want to test,
+ # use Let's Encrypt's staging url:
+ # https://acme-staging.api.letsencrypt.org/directory
+ #
+ #url: https://acme-v01.api.letsencrypt.org/directory
+
+ # Port number to listen on for the HTTP-01 challenge. Change this if
+ # you are forwarding connections through Apache/Nginx/etc.
+ #
+ #port: 80
+
+ # Local addresses to listen on for incoming connections.
+ # Again, you may want to change this if you are forwarding connections
+ # through Apache/Nginx/etc.
+ #
+ #bind_addresses: ['::', '0.0.0.0']
+
+ # How many days remaining on a certificate before it is renewed.
+ #
+ #reprovision_threshold: 30
+
+ # The domain that the certificate should be for. Normally this
+ # should be the same as your Matrix domain (i.e., 'server_name'), but,
+ # by putting a file at 'https://<server_name>/.well-known/matrix/server',
+ # you can delegate incoming traffic to another server. If you do that,
+ # you should give the target of the delegation here.
+ #
+ # For example: if your 'server_name' is 'example.com', but
+ # 'https://example.com/.well-known/matrix/server' delegates to
+ # 'matrix.example.com', you should put 'matrix.example.com' here.
+ #
+ # If not set, defaults to your 'server_name'.
+ #
+ #domain: matrix.example.com
+
+# List of allowed TLS fingerprints for this server to publish along
+# with the signing keys for this server. Other matrix servers that
+# make HTTPS requests to this server will check that the TLS
+# certificates returned by this server match one of the fingerprints.
+#
+# Synapse automatically adds the fingerprint of its own certificate
+# to the list. So if federation traffic is handled directly by synapse
+# then no modification to the list is required.
+#
+# If synapse is run behind a load balancer that handles the TLS then it
+# will be necessary to add the fingerprints of the certificates used by
+# the loadbalancers to this list if they are different to the one
+# synapse is using.
+#
+# Homeservers are permitted to cache the list of TLS fingerprints
+# returned in the key responses up to the "valid_until_ts" returned in
+# key. It may be necessary to publish the fingerprints of a new
+# certificate and wait until the "valid_until_ts" of the previous key
+# responses have passed before deploying it.
+#
+# You can calculate a fingerprint from a given TLS listener via:
+# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
+# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
+# or by checking matrix.org/federationtester/api/report?server_name=$host
+#
+#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
+
+
+
+## Database ##
+
+database:
+ # The database engine name
+ name: "sqlite3"
+ # Arguments to pass to the engine
+ args:
+ # Path to the database
+ database: "DATADIR/homeserver.db"
+
+# Number of events to cache in memory.
+#
+#event_cache_size: 10K
+
+
+## Logging ##
+
+# A yaml python logging config file
+#
+log_config: "CONFDIR/SERVERNAME.log.config"
+
+
+## Ratelimiting ##
+
+# Ratelimiting settings for client actions (registration, login, messaging).
+#
+# Each ratelimiting configuration is made of two parameters:
+# - per_second: number of requests a client can send per second.
+# - burst_count: number of requests a client can send before being throttled.
+#
+# Synapse currently uses the following configurations:
+# - one for messages that ratelimits sending based on the account the client
+# is using
+# - one for registration that ratelimits registration requests based on the
+# client's IP address.
+# - one for login that ratelimits login requests based on the client's IP
+# address.
+# - one for login that ratelimits login requests based on the account the
+# client is attempting to log into.
+# - one for login that ratelimits login requests based on the account the
+# client is attempting to log into, based on the amount of failed login
+# attempts for this account.
+#
+# The defaults are as shown below.
+#
+#rc_message:
+# per_second: 0.2
+# burst_count: 10
+#
+#rc_registration:
+# per_second: 0.17
+# burst_count: 3
+#
+#rc_login:
+# address:
+# per_second: 0.17
+# burst_count: 3
+# account:
+# per_second: 0.17
+# burst_count: 3
+# failed_attempts:
+# per_second: 0.17
+# burst_count: 3
+
+
+# Ratelimiting settings for incoming federation
+#
+# The rc_federation configuration is made up of the following settings:
+# - window_size: window size in milliseconds
+# - sleep_limit: number of federation requests from a single server in
+# a window before the server will delay processing the request.
+# - sleep_delay: duration in milliseconds to delay processing events
+# from remote servers by if they go over the sleep limit.
+# - reject_limit: maximum number of concurrent federation requests
+# allowed from a single server
+# - concurrent: number of federation requests to concurrently process
+# from a single server
+#
+# The defaults are as shown below.
+#
+#rc_federation:
+# window_size: 1000
+# sleep_limit: 10
+# sleep_delay: 500
+# reject_limit: 50
+# concurrent: 3
+
+# Target outgoing federation transaction frequency for sending read-receipts,
+# per-room.
+#
+# If we end up trying to send out more read-receipts, they will get buffered up
+# into fewer transactions.
+#
+#federation_rr_transactions_per_room_per_second: 50
+
+
+
+# Directory where uploaded images and attachments are stored.
+#
+media_store_path: "DATADIR/media_store"
+
+# Media storage providers allow media to be stored in different
+# locations.
+#
+#media_storage_providers:
+# - module: file_system
+# # Whether to write new local files.
+# store_local: false
+# # Whether to write new remote media
+# store_remote: false
+# # Whether to block upload requests waiting for write to this
+# # provider to complete
+# store_synchronous: false
+# config:
+# directory: /mnt/some/other/directory
+
+# Directory where in-progress uploads are stored.
+#
+uploads_path: "DATADIR/uploads"
+
+# The largest allowed upload size in bytes
+#
+#max_upload_size: 10M
+
+# Maximum number of pixels that will be thumbnailed
+#
+#max_image_pixels: 32M
+
+# Whether to generate new thumbnails on the fly to precisely match
+# the resolution requested by the client. If true then whenever
+# a new resolution is requested by the client the server will
+# generate a new thumbnail. If false the server will pick a thumbnail
+# from a precalculated list.
+#
+#dynamic_thumbnails: false
+
+# List of thumbnails to precalculate when an image is uploaded.
+#
+#thumbnail_sizes:
+# - width: 32
+# height: 32
+# method: crop
+# - width: 96
+# height: 96
+# method: crop
+# - width: 320
+# height: 240
+# method: scale
+# - width: 640
+# height: 480
+# method: scale
+# - width: 800
+# height: 600
+# method: scale
+
+# Is the preview URL API enabled?
+#
+# 'false' by default: uncomment the following to enable it (and specify a
+# url_preview_ip_range_blacklist blacklist).
+#
+#url_preview_enabled: true
+
+# List of IP address CIDR ranges that the URL preview spider is denied
+# from accessing. There are no defaults: you must explicitly
+# specify a list for URL previewing to work. You should specify any
+# internal services in your network that you do not want synapse to try
+# to connect to, otherwise anyone in any Matrix room could cause your
+# synapse to issue arbitrary GET requests to your internal services,
+# causing serious security issues.
+#
+# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+# listed here, since they correspond to unroutable addresses.)
+#
+# This must be specified if url_preview_enabled is set. It is recommended that
+# you uncomment the following list as a starting point.
+#
+#url_preview_ip_range_blacklist:
+# - '127.0.0.0/8'
+# - '10.0.0.0/8'
+# - '172.16.0.0/12'
+# - '192.168.0.0/16'
+# - '100.64.0.0/10'
+# - '169.254.0.0/16'
+# - '::1/128'
+# - 'fe80::/64'
+# - 'fc00::/7'
+
+# List of IP address CIDR ranges that the URL preview spider is allowed
+# to access even if they are specified in url_preview_ip_range_blacklist.
+# This is useful for specifying exceptions to wide-ranging blacklisted
+# target IP ranges - e.g. for enabling URL previews for a specific private
+# website only visible in your network.
+#
+#url_preview_ip_range_whitelist:
+# - '192.168.1.1'
+
+# Optional list of URL matches that the URL preview spider is
+# denied from accessing. You should use url_preview_ip_range_blacklist
+# in preference to this, otherwise someone could define a public DNS
+# entry that points to a private IP address and circumvent the blacklist.
+# This is more useful if you know there is an entire shape of URL that
+# you know that will never want synapse to try to spider.
+#
+# Each list entry is a dictionary of url component attributes as returned
+# by urlparse.urlsplit as applied to the absolute form of the URL. See
+# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
+# The values of the dictionary are treated as an filename match pattern
+# applied to that component of URLs, unless they start with a ^ in which
+# case they are treated as a regular expression match. If all the
+# specified component matches for a given list item succeed, the URL is
+# blacklisted.
+#
+#url_preview_url_blacklist:
+# # blacklist any URL with a username in its URI
+# - username: '*'
+#
+# # blacklist all *.google.com URLs
+# - netloc: 'google.com'
+# - netloc: '*.google.com'
+#
+# # blacklist all plain HTTP URLs
+# - scheme: 'http'
+#
+# # blacklist http(s)://www.acme.com/foo
+# - netloc: 'www.acme.com'
+# path: '/foo'
+#
+# # blacklist any URL with a literal IPv4 address
+# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
+
+# The largest allowed URL preview spidering size in bytes
+#
+#max_spider_size: 10M
+
+
+## Captcha ##
+# See docs/CAPTCHA_SETUP for full details of configuring this.
+
+# This Home Server's ReCAPTCHA public key.
+#
+#recaptcha_public_key: "YOUR_PUBLIC_KEY"
+
+# This Home Server's ReCAPTCHA private key.
+#
+#recaptcha_private_key: "YOUR_PRIVATE_KEY"
+
+# Enables ReCaptcha checks when registering, preventing signup
+# unless a captcha is answered. Requires a valid ReCaptcha
+# public/private key.
+#
+#enable_registration_captcha: false
+
+# A secret key used to bypass the captcha test entirely.
+#
+#captcha_bypass_secret: "YOUR_SECRET_HERE"
+
+# The API endpoint to use for verifying m.login.recaptcha responses.
+#
+#recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
+
+
+## TURN ##
+
+# The public URIs of the TURN server to give to clients
+#
+#turn_uris: []
+
+# The shared secret used to compute passwords for the TURN server
+#
+#turn_shared_secret: "YOUR_SHARED_SECRET"
+
+# The Username and password if the TURN server needs them and
+# does not use a token
+#
+#turn_username: "TURNSERVER_USERNAME"
+#turn_password: "TURNSERVER_PASSWORD"
+
+# How long generated TURN credentials last
+#
+#turn_user_lifetime: 1h
+
+# Whether guests should be allowed to use the TURN server.
+# This defaults to True, otherwise VoIP will be unreliable for guests.
+# However, it does introduce a slight security risk as it allows users to
+# connect to arbitrary endpoints without having first signed up for a
+# valid account (e.g. by passing a CAPTCHA).
+#
+#turn_allow_guests: True
+
+
+## Registration ##
+#
+# Registration can be rate-limited using the parameters in the "Ratelimiting"
+# section of this file.
+
+# Enable registration for new users.
+#
+#enable_registration: false
+
+# Optional account validity configuration. This allows for accounts to be denied
+# any request after a given period.
+#
+# ``enabled`` defines whether the account validity feature is enabled. Defaults
+# to False.
+#
+# ``period`` allows setting the period after which an account is valid
+# after its registration. When renewing the account, its validity period
+# will be extended by this amount of time. This parameter is required when using
+# the account validity feature.
+#
+# ``renew_at`` is the amount of time before an account's expiry date at which
+# Synapse will send an email to the account's email address with a renewal link.
+# This needs the ``email`` and ``public_baseurl`` configuration sections to be
+# filled.
+#
+# ``renew_email_subject`` is the subject of the email sent out with the renewal
+# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter
+# from the ``email`` section.
+#
+# Once this feature is enabled, Synapse will look for registered users without an
+# expiration date at startup and will add one to every account it found using the
+# current settings at that time.
+# This means that, if a validity period is set, and Synapse is restarted (it will
+# then derive an expiration date from the current validity period), and some time
+# after that the validity period changes and Synapse is restarted, the users'
+# expiration dates won't be updated unless their account is manually renewed.
+#
+#account_validity:
+# enabled: True
+# period: 6w
+# renew_at: 1w
+# renew_email_subject: "Renew your %(app)s account"
+
+# The user must provide all of the below types of 3PID when registering.
+#
+#registrations_require_3pid:
+# - email
+# - msisdn
+
+# Explicitly disable asking for MSISDNs from the registration
+# flow (overrides registrations_require_3pid if MSISDNs are set as required)
+#
+#disable_msisdn_registration: true
+
+# Mandate that users are only allowed to associate certain formats of
+# 3PIDs with accounts on this server.
+#
+#allowed_local_3pids:
+# - medium: email
+# pattern: '.*@matrix\.org'
+# - medium: email
+# pattern: '.*@vector\.im'
+# - medium: msisdn
+# pattern: '\+44'
+
+# Enable 3PIDs lookup requests to identity servers from this server.
+#
+#enable_3pid_lookup: true
+
+# If set, allows registration of standard or admin accounts by anyone who
+# has the shared secret, even if registration is otherwise disabled.
+#
+# registration_shared_secret: <PRIVATE STRING>
+
+# Set the number of bcrypt rounds used to generate password hash.
+# Larger numbers increase the work factor needed to generate the hash.
+# The default number is 12 (which equates to 2^12 rounds).
+# N.B. that increasing this will exponentially increase the time required
+# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
+#
+#bcrypt_rounds: 12
+
+# Allows users to register as guests without a password/email/etc, and
+# participate in rooms hosted on this server which have been made
+# accessible to anonymous users.
+#
+#allow_guest_access: false
+
+# The identity server which we suggest that clients should use when users log
+# in on this server.
+#
+# (By default, no suggestion is made, so it is left up to the client.
+# This setting is ignored unless public_baseurl is also set.)
+#
+#default_identity_server: https://matrix.org
+
+# The list of identity servers trusted to verify third party
+# identifiers by this server.
+#
+# Also defines the ID server which will be called when an account is
+# deactivated (one will be picked arbitrarily).
+#
+#trusted_third_party_id_servers:
+# - matrix.org
+# - vector.im
+
+# Users who register on this homeserver will automatically be joined
+# to these rooms
+#
+#auto_join_rooms:
+# - "#example:example.com"
+
+# Where auto_join_rooms are specified, setting this flag ensures that the
+# the rooms exist by creating them when the first user on the
+# homeserver registers.
+# Setting to false means that if the rooms are not manually created,
+# users cannot be auto-joined since they do not exist.
+#
+#autocreate_auto_join_rooms: true
+
+
+## Metrics ###
+
+# Enable collection and rendering of performance metrics
+#
+#enable_metrics: False
+
+# Enable sentry integration
+# NOTE: While attempts are made to ensure that the logs don't contain
+# any sensitive information, this cannot be guaranteed. By enabling
+# this option the sentry server may therefore receive sensitive
+# information, and it in turn may then diseminate sensitive information
+# through insecure notification channels if so configured.
+#
+#sentry:
+# dsn: "..."
+
+# Whether or not to report anonymized homeserver usage statistics.
+# report_stats: true|false
+
+
+## API Configuration ##
+
+# A list of event types that will be included in the room_invite_state
+#
+#room_invite_state_types:
+# - "m.room.join_rules"
+# - "m.room.canonical_alias"
+# - "m.room.avatar"
+# - "m.room.encryption"
+# - "m.room.name"
+
+
+# A list of application service config files to use
+#
+#app_service_config_files:
+# - app_service_1.yaml
+# - app_service_2.yaml
+
+# Uncomment to enable tracking of application service IP addresses. Implicitly
+# enables MAU tracking for application service users.
+#
+#track_appservice_user_ips: True
+
+
+# a secret which is used to sign access tokens. If none is specified,
+# the registration_shared_secret is used, if one is given; otherwise,
+# a secret key is derived from the signing key.
+#
+# macaroon_secret_key: <PRIVATE STRING>
+
+# Used to enable access token expiration.
+#
+#expire_access_token: False
+
+# a secret which is used to calculate HMACs for form values, to stop
+# falsification of values. Must be specified for the User Consent
+# forms to work.
+#
+# form_secret: <PRIVATE STRING>
+
+## Signing Keys ##
+
+# Path to the signing key to sign messages with
+#
+signing_key_path: "CONFDIR/SERVERNAME.signing.key"
+
+# The keys that the server used to sign messages with but won't use
+# to sign new messages. E.g. it has lost its private key
+#
+#old_signing_keys:
+# "ed25519:auto":
+# # Base64 encoded public key
+# key: "The public part of your old signing key."
+# # Millisecond POSIX timestamp when the key expired.
+# expired_ts: 123456789123
+
+# How long key response published by this server is valid for.
+# Used to set the valid_until_ts in /key/v2 APIs.
+# Determines how quickly servers will query to check which keys
+# are still valid.
+#
+#key_refresh_interval: 1d
+
+# The trusted servers to download signing keys from.
+#
+#perspectives:
+# servers:
+# "matrix.org":
+# verify_keys:
+# "ed25519:auto":
+# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+
+
+# Enable SAML2 for registration and login. Uses pysaml2.
+#
+# `sp_config` is the configuration for the pysaml2 Service Provider.
+# See pysaml2 docs for format of config.
+#
+# Default values will be used for the 'entityid' and 'service' settings,
+# so it is not normally necessary to specify them unless you need to
+# override them.
+#
+#saml2_config:
+# sp_config:
+# # point this to the IdP's metadata. You can use either a local file or
+# # (preferably) a URL.
+# metadata:
+# #local: ["saml2/idp.xml"]
+# remote:
+# - url: https://our_idp/metadata.xml
+#
+# # The rest of sp_config is just used to generate our metadata xml, and you
+# # may well not need it, depending on your setup. Alternatively you
+# # may need a whole lot more detail - see the pysaml2 docs!
+#
+# description: ["My awesome SP", "en"]
+# name: ["Test SP", "en"]
+#
+# organization:
+# name: Example com
+# display_name:
+# - ["Example co", "en"]
+# url: "http://example.com"
+#
+# contact_person:
+# - given_name: Bob
+# sur_name: "the Sysadmin"
+# email_address": ["admin@example.com"]
+# contact_type": technical
+#
+# # Instead of putting the config inline as above, you can specify a
+# # separate pysaml2 configuration file:
+# #
+# config_path: "CONFDIR/sp_conf.py"
+
+
+
+# Enable CAS for registration and login.
+#
+#cas_config:
+# enabled: true
+# server_url: "https://cas-server.com"
+# service_url: "https://homeserver.domain.com:8448"
+# #required_attributes:
+# # name: value
+
+
+# The JWT needs to contain a globally unique "sub" (subject) claim.
+#
+#jwt_config:
+# enabled: true
+# secret: "a secret"
+# algorithm: "HS256"
+
+
+password_config:
+ # Uncomment to disable password login
+ #
+ #enabled: false
+
+ # Uncomment and change to a secret random string for extra security.
+ # DO NOT CHANGE THIS AFTER INITIAL SETUP!
+ #
+ #pepper: "EVEN_MORE_SECRET"
+
+
+
+# Enable sending emails for notification events or expiry notices
+# Defining a custom URL for Riot is only needed if email notifications
+# should contain links to a self-hosted installation of Riot; when set
+# the "app_name" setting is ignored.
+#
+# If your SMTP server requires authentication, the optional smtp_user &
+# smtp_pass variables should be used
+#
+#email:
+# enable_notifs: false
+# smtp_host: "localhost"
+# smtp_port: 25
+# smtp_user: "exampleusername"
+# smtp_pass: "examplepassword"
+# require_transport_security: False
+# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
+# app_name: Matrix
+# # if template_dir is unset, uses the example templates that are part of
+# # the Synapse distribution.
+# #template_dir: res/templates
+# notif_template_html: notif_mail.html
+# notif_template_text: notif_mail.txt
+# # Templates for account expiry notices.
+# expiry_template_html: notice_expiry.html
+# expiry_template_text: notice_expiry.txt
+# notif_for_new_users: True
+# riot_base_url: "http://localhost/riot"
+
+
+#password_providers:
+# - module: "ldap_auth_provider.LdapAuthProvider"
+# config:
+# enabled: true
+# uri: "ldap://ldap.example.com:389"
+# start_tls: true
+# base: "ou=users,dc=example,dc=com"
+# attributes:
+# uid: "cn"
+# mail: "email"
+# name: "givenName"
+# #bind_dn:
+# #bind_password:
+# #filter: "(objectClass=posixAccount)"
+
+
+
+# Clients requesting push notifications can either have the body of
+# the message sent in the notification poke along with other details
+# like the sender, or just the event ID and room ID (`event_id_only`).
+# If clients choose the former, this option controls whether the
+# notification request includes the content of the event (other details
+# like the sender are still included). For `event_id_only` push, it
+# has no effect.
+#
+# For modern android devices the notification content will still appear
+# because it is loaded by the app. iPhone, however will send a
+# notification saying only that a message arrived and who it came from.
+#
+#push:
+# include_content: true
+
+
+#spam_checker:
+# module: "my_custom_project.SuperSpamChecker"
+# config:
+# example_option: 'things'
+
+
+# Uncomment to allow non-server-admin users to create groups on this server
+#
+#enable_group_creation: true
+
+# If enabled, non server admins can only create groups with local parts
+# starting with this prefix
+#
+#group_creation_prefix: "unofficial/"
+
+
+
+# User Directory configuration
+#
+# 'enabled' defines whether users can search the user directory. If
+# false then empty responses are returned to all queries. Defaults to
+# true.
+#
+# 'search_all_users' defines whether to search all users visible to your HS
+# when searching the user directory, rather than limiting to users visible
+# in public rooms. Defaults to false. If you set it True, you'll have to run
+# UPDATE user_directory_stream_pos SET stream_id = NULL;
+# on your database to tell it to rebuild the user_directory search indexes.
+#
+#user_directory:
+# enabled: true
+# search_all_users: false
+
+
+# User Consent configuration
+#
+# for detailed instructions, see
+# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
+#
+# Parts of this section are required if enabling the 'consent' resource under
+# 'listeners', in particular 'template_dir' and 'version'.
+#
+# 'template_dir' gives the location of the templates for the HTML forms.
+# This directory should contain one subdirectory per language (eg, 'en', 'fr'),
+# and each language directory should contain the policy document (named as
+# '<version>.html') and a success page (success.html).
+#
+# 'version' specifies the 'current' version of the policy document. It defines
+# the version to be served by the consent resource if there is no 'v'
+# parameter.
+#
+# 'server_notice_content', if enabled, will send a user a "Server Notice"
+# asking them to consent to the privacy policy. The 'server_notices' section
+# must also be configured for this to work. Notices will *not* be sent to
+# guest users unless 'send_server_notice_to_guests' is set to true.
+#
+# 'block_events_error', if set, will block any attempts to send events
+# until the user consents to the privacy policy. The value of the setting is
+# used as the text of the error.
+#
+# 'require_at_registration', if enabled, will add a step to the registration
+# process, similar to how captcha works. Users will be required to accept the
+# policy before their account is created.
+#
+# 'policy_name' is the display name of the policy users will see when registering
+# for an account. Has no effect unless `require_at_registration` is enabled.
+# Defaults to "Privacy Policy".
+#
+#user_consent:
+# template_dir: res/templates/privacy
+# version: 1.0
+# server_notice_content:
+# msgtype: m.text
+# body: >-
+# To continue using this homeserver you must review and agree to the
+# terms and conditions at %(consent_uri)s
+# send_server_notice_to_guests: True
+# block_events_error: >-
+# To continue using this homeserver you must review and agree to the
+# terms and conditions at %(consent_uri)s
+# require_at_registration: False
+# policy_name: Privacy Policy
+#
+
+
+# Server Notices room configuration
+#
+# Uncomment this section to enable a room which can be used to send notices
+# from the server to users. It is a special room which cannot be left; notices
+# come from a special "notices" user id.
+#
+# If you uncomment this section, you *must* define the system_mxid_localpart
+# setting, which defines the id of the user which will be used to send the
+# notices.
+#
+# It's also possible to override the room name, the display name of the
+# "notices" user, and the avatar for the user.
+#
+#server_notices:
+# system_mxid_localpart: notices
+# system_mxid_display_name: "Server Notices"
+# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
+# room_name: "Server Notices"
+
+
+
+# Uncomment to disable searching the public room list. When disabled
+# blocks searching local and remote room lists for local and remote
+# users by always returning an empty list for all queries.
+#
+#enable_room_list_search: false
+
+# The `alias_creation` option controls who's allowed to create aliases
+# on this server.
+#
+# The format of this option is a list of rules that contain globs that
+# match against user_id, room_id and the new alias (fully qualified with
+# server name). The action in the first rule that matches is taken,
+# which can currently either be "allow" or "deny".
+#
+# Missing user_id/room_id/alias fields default to "*".
+#
+# If no rules match the request is denied. An empty list means no one
+# can create aliases.
+#
+# Options for the rules include:
+#
+# user_id: Matches against the creator of the alias
+# alias: Matches against the alias being created
+# room_id: Matches against the room ID the alias is being pointed at
+# action: Whether to "allow" or "deny" the request if the rule matches
+#
+# The default is:
+#
+#alias_creation_rules:
+# - user_id: "*"
+# alias: "*"
+# room_id: "*"
+# action: allow
+
+# The `room_list_publication_rules` option controls who can publish and
+# which rooms can be published in the public room list.
+#
+# The format of this option is the same as that for
+# `alias_creation_rules`.
+#
+# If the room has one or more aliases associated with it, only one of
+# the aliases needs to match the alias rule. If there are no aliases
+# then only rules with `alias: *` match.
+#
+# If no rules match the request is denied. An empty list means no one
+# can publish rooms.
+#
+# Options for the rules include:
+#
+# user_id: Matches agaisnt the creator of the alias
+# room_id: Matches against the room ID being published
+# alias: Matches against any current local or canonical aliases
+# associated with the room
+# action: Whether to "allow" or "deny" the request if the rule matches
+#
+# The default is:
+#
+#room_list_publication_rules:
+# - user_id: "*"
+# alias: "*"
+# room_id: "*"
+# action: allow
diff --git a/docs/server_notices.md b/docs/server_notices.md
index 58f8776..950a660 100644
--- a/docs/server_notices.md
+++ b/docs/server_notices.md
@@ -1,5 +1,4 @@
-Server Notices
-==============
+# Server Notices
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
channel whereby server administrators can send messages to users on the server.
@@ -11,8 +10,7 @@ they may also find a use for features such as "Message of the day".
This is a feature specific to Synapse, but it uses standard Matrix
communication mechanisms, so should work with any Matrix client.
-User experience
----------------
+## User experience
When the user is first sent a server notice, they will get an invitation to a
room (typically called 'Server Notices', though this is configurable in
@@ -29,8 +27,7 @@ levels.
Having joined the room, the user can leave the room if they want. Subsequent
server notices will then cause a new room to be created.
-Synapse configuration
----------------------
+## Synapse configuration
Server notices come from a specific user id on the server. Server
administrators are free to choose the user id - something like `server` is
@@ -58,17 +55,7 @@ room which will be created.
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
displayname and avatar of the Server Notices user.
-Sending notices
----------------
+## Sending notices
-As of the current version of synapse, there is no convenient interface for
-sending notices (other than the automated ones sent as part of consent
-tracking).
-
-In the meantime, it is possible to test this feature using the manhole. Having
-gone into the manhole as described in [manhole.md](manhole.md), a notice can be
-sent with something like:
-
-```
->>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
-```
+To send server notices to users you can use the
+[admin_api](admin_api/server_notices.md).
diff --git a/docs/tcp_replication.rst b/docs/tcp_replication.rst
index 73436ce..75e7234 100644
--- a/docs/tcp_replication.rst
+++ b/docs/tcp_replication.rst
@@ -188,7 +188,9 @@ RDATA (S)
A single update in a stream
POSITION (S)
- The position of the stream has been updated
+ The position of the stream has been updated. Sent to the client after all
+ missing updates for a stream have been sent to the client and they're now
+ up to date.
ERROR (S, C)
There was an error
diff --git a/docs/workers.rst b/docs/workers.rst
index 3ba5879..aa4e7a1 100644
--- a/docs/workers.rst
+++ b/docs/workers.rst
@@ -182,6 +182,7 @@ endpoints matching the following regular expressions::
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/send/
+ ^/_matrix/key/v2/query
The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration.
@@ -223,6 +224,15 @@ following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
^/_matrix/client/(api/v1|r0|unstable)/login$
+ ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
+ ^/_matrix/client/(api/v1|r0|unstable)/keys/query$
+ ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
+ ^/_matrix/client/versions$
+ ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
+
+Additionally, the following REST endpoints can be handled for GET requests::
+
+ ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance::
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages
index 6b9be99..93305ee 100755
--- a/scripts-dev/build_debian_packages
+++ b/scripts-dev/build_debian_packages
@@ -24,6 +24,7 @@ DISTS = (
"ubuntu:xenial",
"ubuntu:bionic",
"ubuntu:cosmic",
+ "ubuntu:disco",
)
DESC = '''\
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index e4a22ba..0ec5075 100755
--- a/scripts-dev/check-newsfragment
+++ b/scripts-dev/check-newsfragment
@@ -7,14 +7,12 @@ set -e
# make sure that origin/develop is up to date
git remote set-branches --add origin develop
-git fetch --depth=1 origin develop
-
-UPSTREAM=origin/develop
+git fetch origin develop
# if there are changes in the debian directory, check that the debian changelog
# has been updated
-if ! git diff --quiet $UPSTREAM... -- debian; then
- if git diff --quiet $UPSTREAM... -- debian/changelog; then
+if ! git diff --quiet FETCH_HEAD... -- debian; then
+ if git diff --quiet FETCH_HEAD... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
exit 1
fi
@@ -22,7 +20,7 @@ fi
# if there are changes *outside* the debian directory, check that the
# newsfragments have been updated.
-if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
+if git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
tox -e check-newsfragment
fi
@@ -31,10 +29,10 @@ echo "--------------------------"
echo
# check that any new newsfiles on this branch end with a full stop.
-for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
+for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
lastchar=`tr -d '\n' < $f | tail -c 1`
- if [ $lastchar != '.' ]; then
- echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
+ if [ $lastchar != '.' -a $lastchar != '!' ]; then
+ echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
exit 1
fi
done
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
index dde8596..ac152b5 100644
--- a/scripts-dev/convert_server_keys.py
+++ b/scripts-dev/convert_server_keys.py
@@ -76,7 +76,7 @@ def rows_v2(server, json):
def main():
- config = yaml.load(open(sys.argv[1]))
+ config = yaml.safe_load(open(sys.argv[1]))
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
server_name = config["server_name"]
diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config
new file mode 100755
index 0000000..5e33b9b
--- /dev/null
+++ b/scripts-dev/generate_sample_config
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Update/check the docs/sample_config.yaml
+
+set -e
+
+cd `dirname $0`/..
+
+SAMPLE_CONFIG="docs/sample_config.yaml"
+
+if [ "$1" == "--check" ]; then
+ diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
+ echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
+ exit 1
+ }
+else
+ ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
+fi
diff --git a/scripts/generate_config b/scripts/generate_config
index 61c5f04..93b6406 100755
--- a/scripts/generate_config
+++ b/scripts/generate_config
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import argparse
+import shutil
import sys
from synapse.config.homeserver import HomeServerConfig
@@ -50,6 +51,13 @@ if __name__ == "__main__":
help="File to write the configuration to. Default: stdout",
)
+ parser.add_argument(
+ "--header-file",
+ type=argparse.FileType('r'),
+ help="File from which to read a header, which will be printed before the "
+ "generated config.",
+ )
+
args = parser.parse_args()
report_stats = args.report_stats
@@ -64,4 +72,7 @@ if __name__ == "__main__":
report_stats=report_stats,
)
+ if args.header_file:
+ shutil.copyfileobj(args.header_file, args.output_file)
+
args.output_file.write(conf)
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 2fa01d1..41be9c9 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -58,15 +58,11 @@ BOOLEAN_COLUMNS = {
APPEND_ONLY_TABLES = [
- "event_content_hashes",
"event_reference_hashes",
- "event_signatures",
- "event_edge_hashes",
"events",
"event_json",
"state_events",
"room_memberships",
- "feedback",
"topics",
"room_names",
"rooms",
@@ -88,7 +84,6 @@ APPEND_ONLY_TABLES = [
"event_search",
"presence_stream",
"push_rules_stream",
- "current_state_resets",
"ex_outlier_stream",
"cache_invalidation_stream",
"public_room_list_stream",
@@ -811,7 +806,7 @@ class CursesProgress(Progress):
middle_space = 1
items = self.tables.items()
- items.sort(key=lambda i: (i[1]["perc"], i[0]))
+ items = sorted(items, key=lambda i: (i[1]["perc"], i[0]))
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
diff --git a/setup.py b/setup.py
index 55b1b10..55663e9 100755
--- a/setup.py
+++ b/setup.py
@@ -86,13 +86,9 @@ long_description = read_file(("README.rst",))
REQUIREMENTS = dependencies['REQUIREMENTS']
CONDITIONAL_REQUIREMENTS = dependencies['CONDITIONAL_REQUIREMENTS']
+ALL_OPTIONAL_REQUIREMENTS = dependencies['ALL_OPTIONAL_REQUIREMENTS']
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
-ALL_OPTIONAL_REQUIREMENTS = set()
-
-for optional_deps in CONDITIONAL_REQUIREMENTS.values():
- ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
-
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 25c1024..4f95778 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -27,4 +27,4 @@ try:
except ImportError:
pass
-__version__ = "0.99.2"
+__version__ = "0.99.5.1"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 5992d30..0c6c93a 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -64,6 +64,8 @@ class Auth(object):
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
register_cache("cache", "token_cache", self.token_cache)
+ self._account_validity = hs.config.account_validity
+
@defer.inlineCallbacks
def check_from_context(self, room_version, event, context, do_sig_check=True):
prev_state_ids = yield context.get_prev_state_ids(self.store)
@@ -226,6 +228,17 @@ class Auth(object):
token_id = user_info["token_id"]
is_guest = user_info["is_guest"]
+ # Deny the request if the user account has expired.
+ if self._account_validity.enabled:
+ user_id = user.to_string()
+ expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+ if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
+ raise AuthError(
+ 403,
+ "User account has expired",
+ errcode=Codes.EXPIRED_ACCOUNT,
+ )
+
# device_id may not be present if get_user_by_access_token has been
# stubbed out.
device_id = user_info.get("device_id")
@@ -543,7 +556,7 @@ class Auth(object):
""" Check if the given user is a local server admin.
Args:
- user (str): mxid of user to check
+ user (UserID): user to check
Returns:
bool: True if the user is an admin
@@ -621,13 +634,13 @@ class Auth(object):
Returns:
True if the the sender is allowed to redact the target event if the
- target event was created by them.
+ target event was created by them.
False if the sender is allowed to redact the target event with no
- further checks.
+ further checks.
Raises:
AuthError if the event sender is definitely not allowed to redact
- the target event.
+ the target event.
"""
return event_auth.check_redaction(room_version, event, auth_events)
@@ -743,9 +756,9 @@ class Auth(object):
Returns:
Deferred[tuple[str, str|None]]: Resolves to the current membership of
- the user in the room and the membership event ID of the user. If
- the user is not in the room and never has been, then
- `(Membership.JOIN, None)` is returned.
+ the user in the room and the membership event ID of the user. If
+ the user is not in the room and never has been, then
+ `(Membership.JOIN, None)` is returned.
"""
try:
@@ -777,20 +790,22 @@ class Auth(object):
Args:
user_id(str|None): If present, checks for presence against existing
- MAU cohort
+ MAU cohort
threepid(dict|None): If present, checks for presence against configured
- reserved threepid. Used in cases where the user is trying register
- with a MAU blocked server, normally they would be rejected but their
- threepid is on the reserved list. user_id and
- threepid should never be set at the same time.
+ reserved threepid. Used in cases where the user is trying register
+ with a MAU blocked server, normally they would be rejected but their
+ threepid is on the reserved list. user_id and
+ threepid should never be set at the same time.
"""
# Never fail an auth check for the server notices users or support user
# This can be a problem where event creation is prohibited due to blocking
- is_support = yield self.store.is_support_user(user_id)
- if user_id == self.hs.config.server_notices_mxid or is_support:
- return
+ if user_id is not None:
+ if user_id == self.hs.config.server_notices_mxid:
+ return
+ if (yield self.store.is_support_user(user_id)):
+ return
if self.hs.config.hs_disabled:
raise ResourceLimitError(
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index f47c33a..6b347b1 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +20,12 @@
# the "depth" field on events is limited to 2**63 - 1
MAX_DEPTH = 2**63 - 1
+# the maximum length for a room alias is 255 characters
+MAX_ALIAS_LENGTH = 255
+
+# the maximum length for a user id is 255 characters
+MAX_USERID_LENGTH = 255
+
class Membership(object):
@@ -69,6 +75,7 @@ class EventTypes(object):
Redaction = "m.room.redaction"
ThirdPartyInvite = "m.room.third_party_invite"
Encryption = "m.room.encryption"
+ RelatedGroups = "m.room.related_groups"
RoomHistoryVisibility = "m.room.history_visibility"
CanonicalAlias = "m.room.canonical_alias"
@@ -102,46 +109,6 @@ class ThirdPartyEntityKind(object):
LOCATION = "location"
-class RoomVersions(object):
- V1 = "1"
- V2 = "2"
- V3 = "3"
- STATE_V2_TEST = "state-v2-test"
-
-
-class RoomDisposition(object):
- STABLE = "stable"
- UNSTABLE = "unstable"
-
-
-# the version we will give rooms which are created on this server
-DEFAULT_ROOM_VERSION = RoomVersions.V1
-
-# vdh-test-version is a placeholder to get room versioning support working and tested
-# until we have a working v2.
-KNOWN_ROOM_VERSIONS = {
- RoomVersions.V1,
- RoomVersions.V2,
- RoomVersions.V3,
- RoomVersions.STATE_V2_TEST,
- RoomVersions.V3,
-}
-
-
-class EventFormatVersions(object):
- """This is an internal enum for tracking the version of the event format,
- independently from the room version.
- """
- V1 = 1
- V2 = 2
-
-
-KNOWN_EVENT_FORMAT_VERSIONS = {
- EventFormatVersions.V1,
- EventFormatVersions.V2,
-}
-
-
ServerNoticeMsgType = "m.server_notice"
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
@@ -152,3 +119,11 @@ class UserTypes(object):
"""
SUPPORT = "support"
ALL_USER_TYPES = (SUPPORT,)
+
+
+class RelationTypes(object):
+ """The types of relations known to this server.
+ """
+ ANNOTATION = "m.annotation"
+ REPLACE = "m.replace"
+ REFERENCE = "m.reference"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 0b46483..e916970 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -60,6 +60,7 @@ class Codes(object):
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
+ EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
class CodeMessageException(RuntimeError):
@@ -327,9 +328,23 @@ class RoomKeysVersionError(SynapseError):
self.current_version = current_version
+class UnsupportedRoomVersionError(SynapseError):
+ """The client's request to create a room used a room version that the server does
+ not support."""
+ def __init__(self):
+ super(UnsupportedRoomVersionError, self).__init__(
+ code=400,
+ msg="Homeserver does not support this room version",
+ errcode=Codes.UNSUPPORTED_ROOM_VERSION,
+ )
+
+
class IncompatibleRoomVersionError(SynapseError):
- """A server is trying to join a room whose version it does not support."""
+ """A server is trying to join a room whose version it does not support.
+ Unlike UnsupportedRoomVersionError, it is specific to the case of the make_join
+ failing.
+ """
def __init__(self, room_version):
super(IncompatibleRoomVersionError, self).__init__(
code=400,
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 3bb5b3d..296c4a1 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -14,6 +14,8 @@
import collections
+from synapse.api.errors import LimitExceededError
+
class Ratelimiter(object):
"""
@@ -23,12 +25,13 @@ class Ratelimiter(object):
def __init__(self):
self.message_counts = collections.OrderedDict()
- def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
- """Can the user send a message?
+ def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
+ """Can the entity (e.g. user or IP address) perform the action?
Args:
- user_id: The user sending a message.
+ key: The key we should use when rate limiting. Can be a user ID
+ (when sending events), an IP address, etc.
time_now_s: The time now.
- msg_rate_hz: The long term number of messages a user can send in a
+ rate_hz: The long term number of messages a user can send in a
second.
burst_count: How many messages the user can send before being
limited.
@@ -41,10 +44,10 @@ class Ratelimiter(object):
"""
self.prune_message_counts(time_now_s)
message_count, time_start, _ignored = self.message_counts.get(
- user_id, (0., time_now_s, None),
+ key, (0., time_now_s, None),
)
time_delta = time_now_s - time_start
- sent_count = message_count - time_delta * msg_rate_hz
+ sent_count = message_count - time_delta * rate_hz
if sent_count < 0:
allowed = True
time_start = time_now_s
@@ -56,13 +59,13 @@ class Ratelimiter(object):
message_count += 1
if update:
- self.message_counts[user_id] = (
- message_count, time_start, msg_rate_hz
+ self.message_counts[key] = (
+ message_count, time_start, rate_hz
)
- if msg_rate_hz > 0:
+ if rate_hz > 0:
time_allowed = (
- time_start + (message_count - burst_count + 1) / msg_rate_hz
+ time_start + (message_count - burst_count + 1) / rate_hz
)
if time_allowed < time_now_s:
time_allowed = time_now_s
@@ -72,12 +75,22 @@ class Ratelimiter(object):
return allowed, time_allowed
def prune_message_counts(self, time_now_s):
- for user_id in list(self.message_counts.keys()):
- message_count, time_start, msg_rate_hz = (
- self.message_counts[user_id]
+ for key in list(self.message_counts.keys()):
+ message_count, time_start, rate_hz = (
+ self.message_counts[key]
)
time_delta = time_now_s - time_start
- if message_count - time_delta * msg_rate_hz > 0:
+ if message_count - time_delta * rate_hz > 0:
break
else:
- del self.message_counts[user_id]
+ del self.message_counts[key]
+
+ def ratelimit(self, key, time_now_s, rate_hz, burst_count, update=True):
+ allowed, time_allowed = self.can_do_action(
+ key, time_now_s, rate_hz, burst_count, update
+ )
+
+ if not allowed:
+ raise LimitExceededError(
+ retry_after_ms=int(1000 * (time_allowed - time_now_s)),
+ )
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
new file mode 100644
index 0000000..b289535
--- /dev/null
+++ b/synapse/api/room_versions.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import attr
+
+
+class EventFormatVersions(object):
+ """This is an internal enum for tracking the version of the event format,
+ independently from the room version.
+ """
+ V1 = 1 # $id:server event id format
+ V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
+ V3 = 3 # MSC1884-style $hash format: introduced for room v4
+
+
+KNOWN_EVENT_FORMAT_VERSIONS = {
+ EventFormatVersions.V1,
+ EventFormatVersions.V2,
+ EventFormatVersions.V3,
+}
+
+
+class StateResolutionVersions(object):
+ """Enum to identify the state resolution algorithms"""
+ V1 = 1 # room v1 state res
+ V2 = 2 # MSC1442 state res: room v2 and later
+
+
+class RoomDisposition(object):
+ STABLE = "stable"
+ UNSTABLE = "unstable"
+
+
+@attr.s(slots=True, frozen=True)
+class RoomVersion(object):
+ """An object which describes the unique attributes of a room version."""
+
+ identifier = attr.ib() # str; the identifier for this version
+ disposition = attr.ib() # str; one of the RoomDispositions
+ event_format = attr.ib() # int; one of the EventFormatVersions
+ state_res = attr.ib() # int; one of the StateResolutionVersions
+
+
+class RoomVersions(object):
+ V1 = RoomVersion(
+ "1",
+ RoomDisposition.STABLE,
+ EventFormatVersions.V1,
+ StateResolutionVersions.V1,
+ )
+ STATE_V2_TEST = RoomVersion(
+ "state-v2-test",
+ RoomDisposition.UNSTABLE,
+ EventFormatVersions.V1,
+ StateResolutionVersions.V2,
+ )
+ V2 = RoomVersion(
+ "2",
+ RoomDisposition.STABLE,
+ EventFormatVersions.V1,
+ StateResolutionVersions.V2,
+ )
+ V3 = RoomVersion(
+ "3",
+ RoomDisposition.STABLE,
+ EventFormatVersions.V2,
+ StateResolutionVersions.V2,
+ )
+ V4 = RoomVersion(
+ "4",
+ RoomDisposition.STABLE,
+ EventFormatVersions.V3,
+ StateResolutionVersions.V2,
+ )
+
+
+# the version we will give rooms which are created on this server
+DEFAULT_ROOM_VERSION = RoomVersions.V1
+
+
+KNOWN_ROOM_VERSIONS = {
+ v.identifier: v for v in (
+ RoomVersions.V1,
+ RoomVersions.V2,
+ RoomVersions.V3,
+ RoomVersions.STATE_V2_TEST,
+ RoomVersions.V4,
+ )
+} # type: dict[str, RoomVersion]
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 8102176..3c6bddf 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,7 @@ from six.moves.urllib.parse import urlencode
from synapse.config import ConfigError
-CLIENT_PREFIX = "/_matrix/client/api/v1"
-CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
+CLIENT_API_PREFIX = "/_matrix/client"
FEDERATION_PREFIX = "/_matrix/federation"
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 32e8b8a..08199a5 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -22,13 +22,14 @@ import traceback
import psutil
from daemonize import Daemonize
-from twisted.internet import error, reactor
+from twisted.internet import defer, error, reactor
from twisted.protocols.tls import TLSMemoryBIOFactory
import synapse
from synapse.app import check_bind_error
from synapse.crypto import context_factory
from synapse.util import PreserveLoggingContext
+from synapse.util.async_helpers import Linearizer
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
@@ -63,12 +64,13 @@ def start_worker_reactor(appname, config):
start_reactor(
appname,
- config.soft_file_limit,
- config.gc_thresholds,
- config.worker_pid_file,
- config.worker_daemonize,
- config.worker_cpu_affinity,
- logger,
+ soft_file_limit=config.soft_file_limit,
+ gc_thresholds=config.gc_thresholds,
+ pid_file=config.worker_pid_file,
+ daemonize=config.worker_daemonize,
+ cpu_affinity=config.worker_cpu_affinity,
+ print_pidfile=config.print_pidfile,
+ logger=logger,
)
@@ -79,6 +81,7 @@ def start_reactor(
pid_file,
daemonize,
cpu_affinity,
+ print_pidfile,
logger,
):
""" Run the reactor in the main process
@@ -93,9 +96,12 @@ def start_reactor(
pid_file (str): name of pid file to write to if daemonize is True
daemonize (bool): true to run the reactor in a background process
cpu_affinity (int|None): cpu affinity mask
+ print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
"""
+ install_dns_limiter(reactor)
+
def run():
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
@@ -124,6 +130,9 @@ def start_reactor(
reactor.run()
if daemonize:
+ if print_pidfile:
+ print(pid_file)
+
daemon = Daemonize(
app=appname,
pid=pid_file,
@@ -306,3 +315,81 @@ def setup_sentry(hs):
name = hs.config.worker_name if hs.config.worker_name else "master"
scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name)
+
+
+def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
+ """Replaces the resolver with one that limits the number of in flight DNS
+ requests.
+
+ This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
+ can run out of file descriptors and infinite loop if we attempt to do too
+ many DNS queries at once
+ """
+ new_resolver = _LimitedHostnameResolver(
+ reactor.nameResolver, max_dns_requests_in_flight,
+ )
+
+ reactor.installNameResolver(new_resolver)
+
+
+class _LimitedHostnameResolver(object):
+ """Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
+ """
+
+ def __init__(self, resolver, max_dns_requests_in_flight):
+ self._resolver = resolver
+ self._limiter = Linearizer(
+ name="dns_client_limiter", max_count=max_dns_requests_in_flight,
+ )
+
+ def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
+ addressTypes=None, transportSemantics='TCP'):
+ # Note this is happening deep within the reactor, so we don't need to
+ # worry about log contexts.
+
+ # We need this function to return `resolutionReceiver` so we do all the
+ # actual logic involving deferreds in a separate function.
+ self._resolve(
+ resolutionReceiver, hostName, portNumber,
+ addressTypes, transportSemantics,
+ )
+
+ return resolutionReceiver
+
+ @defer.inlineCallbacks
+ def _resolve(self, resolutionReceiver, hostName, portNumber=0,
+ addressTypes=None, transportSemantics='TCP'):
+
+ with (yield self._limiter.queue(())):
+ # resolveHostName doesn't return a Deferred, so we need to hook into
+ # the receiver interface to get told when resolution has finished.
+
+ deferred = defer.Deferred()
+ receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)
+
+ self._resolver.resolveHostName(
+ receiver, hostName, portNumber,
+ addressTypes, transportSemantics,
+ )
+
+ yield deferred
+
+
+class _DeferredResolutionReceiver(object):
+ """Wraps a IResolutionReceiver and simply resolves the given deferred when
+ resolution is complete
+ """
+
+ def __init__(self, receiver, deferred):
+ self._receiver = receiver
+ self._deferred = deferred
+
+ def resolutionBegan(self, resolutionInProgress):
+ self._receiver.resolutionBegan(resolutionInProgress)
+
+ def addressResolved(self, address):
+ self._receiver.addressResolved(address)
+
+ def resolutionComplete(self):
+ self._deferred.callback(())
+ self._receiver.resolutionComplete()
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 043b48f..864f1ea 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -33,14 +33,19 @@ from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
+from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
+from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
+from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
+from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.login import LoginRestServlet
+from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.room import (
JoinedRoomMemberListRestServlet,
PublicRoomListRestServlet,
@@ -48,7 +53,11 @@ from synapse.rest.client.v1.room import (
RoomMemberListRestServlet,
RoomStateRestServlet,
)
+from synapse.rest.client.v1.voip import VoipRestServlet
+from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
+from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
+from synapse.rest.client.versions import VersionsRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -60,6 +69,10 @@ logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
+ SlavedDeviceInboxStore,
+ SlavedDeviceStore,
+ SlavedReceiptsStore,
+ SlavedPushRuleStore,
SlavedAccountDataStore,
SlavedEventStore,
SlavedKeyStore,
@@ -96,12 +109,15 @@ class ClientReaderServer(HomeServer):
RoomEventContextServlet(self).register(resource)
RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource)
+ ThreepidRestServlet(self).register(resource)
+ KeyQueryServlet(self).register(resource)
+ KeyChangesServlet(self).register(resource)
+ VoipRestServlet(self).register(resource)
+ PushRuleRestServlet(self).register(resource)
+ VersionsRestServlet().register(resource)
resources.update({
- "/_matrix/client/r0": resource,
- "/_matrix/client/unstable": resource,
- "/_matrix/client/v2_alpha": resource,
- "/_matrix/client/api/v1": resource,
+ "/_matrix/client": resource,
})
root_resource = create_resource_tree(resources, NoResource())
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index b116c17..7da79dc 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse
from synapse import events
-from synapse.api.urls import FEDERATION_PREFIX
+from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
@@ -44,6 +44,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -99,6 +100,9 @@ class FederationReaderServer(HomeServer):
),
})
+ if name in ["keys", "federation"]:
+ resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index a461442..1d43f2b 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging
from synapse.federation import send_queue
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
@@ -37,8 +38,10 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.replication.tcp.streams._base import ReceiptsStream
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
+from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
@@ -202,6 +205,7 @@ class FederationSenderHandler(object):
"""
def __init__(self, hs, replication_client):
self.store = hs.get_datastore()
+ self._is_mine_id = hs.is_mine_id
self.federation_sender = hs.get_federation_sender()
self.replication_client = replication_client
@@ -234,6 +238,32 @@ class FederationSenderHandler(object):
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
+ # ... and when new receipts happen
+ elif stream_name == ReceiptsStream.NAME:
+ run_as_background_process(
+ "process_receipts_for_federation", self._on_new_receipts, rows,
+ )
+
+ @defer.inlineCallbacks
+ def _on_new_receipts(self, rows):
+ """
+ Args:
+ rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
+ new receipts to be processed
+ """
+ for receipt in rows:
+ # we only want to send on receipts for our own users
+ if not self._is_mine_id(receipt.user_id):
+ continue
+ receipt_info = ReadReceipt(
+ receipt.room_id,
+ receipt.receipt_type,
+ receipt.user_id,
+ [receipt.event_id],
+ receipt.data,
+ )
+ yield self.federation_sender.send_read_receipt(receipt_info)
+
@defer.inlineCallbacks
def update_token(self, token):
try:
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index e8b6cc3..1045d28 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -62,6 +62,7 @@ from synapse.python_dependencies import check_requirements
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
+from synapse.rest.admin import AdminRestResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.well_known import WellKnownResource
@@ -180,6 +181,7 @@ class SynapseHomeServer(HomeServer):
"/_matrix/client/v2_alpha": client_resource,
"/_matrix/client/versions": client_resource,
"/.well-known/matrix/client": WellKnownResource(self),
+ "/_synapse/admin": AdminRestResource(self),
})
if self.get_config().saml2_enabled:
@@ -376,6 +378,7 @@ def setup(config_options):
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
+ hs.setup_master()
@defer.inlineCallbacks
def do_acme():
@@ -517,6 +520,7 @@ def run(hs):
uptime = 0
stats["homeserver"] = hs.config.server_name
+ stats["server_context"] = hs.config.server_context
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
version = sys.version_info
@@ -557,7 +561,6 @@ def run(hs):
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
-
logger.info("Reporting stats to matrix.org: %s" % (stats,))
try:
yield hs.get_simple_http_client().put_json(
@@ -636,17 +639,15 @@ def run(hs):
# be quite busy the first few minutes
clock.call_later(5 * 60, start_phone_stats_home)
- if hs.config.daemonize and hs.config.print_pidfile:
- print(hs.config.pid_file)
-
_base.start_reactor(
"synapse-homeserver",
- hs.config.soft_file_limit,
- hs.config.gc_thresholds,
- hs.config.pid_file,
- hs.config.daemonize,
- hs.config.cpu_affinity,
- logger,
+ soft_file_limit=hs.config.soft_file_limit,
+ gc_thresholds=hs.config.gc_thresholds,
+ pid_file=hs.config.pid_file,
+ daemonize=hs.config.daemonize,
+ cpu_affinity=hs.config.cpu_affinity,
+ print_pidfile=hs.config.print_pidfile,
+ logger=logger,
)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 9163b56..5388def 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -48,6 +48,7 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.replication.tcp.streams.events import EventsStreamEventRow
from synapse.rest.client.v1 import events
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
@@ -369,7 +370,9 @@ class SyncReplicationHandler(ReplicationClientHandler):
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
for row in rows:
- event = yield self.store.get_event(row.event_id)
+ if row.type != EventsStreamEventRow.TypeId:
+ continue
+ event = yield self.store.get_event(row.data.event_id)
extra_users = ()
if event.type == EventTypes.Member:
extra_users = (event.state_key,)
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index d1ab951..355f5aa 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -36,6 +36,10 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
+from synapse.replication.tcp.streams.events import (
+ EventsStream,
+ EventsStreamCurrentStateRow,
+)
from synapse.rest.client.v2_alpha import user_directory
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
@@ -73,19 +77,18 @@ class UserDirectorySlaveStore(
prefilled_cache=curr_state_delta_prefill,
)
- self._current_state_delta_pos = events_max
-
def stream_positions(self):
result = super(UserDirectorySlaveStore, self).stream_positions()
- result["current_state_deltas"] = self._current_state_delta_pos
return result
def process_replication_rows(self, stream_name, token, rows):
- if stream_name == "current_state_deltas":
- self._current_state_delta_pos = token
+ if stream_name == EventsStream.NAME:
+ self._stream_id_gen.advance(token)
for row in rows:
+ if row.type != EventsStreamCurrentStateRow.TypeId:
+ continue
self._curr_state_delta_stream_cache.entity_has_changed(
- row.room_id, token
+ row.data.room_id, token
)
return super(UserDirectorySlaveStore, self).process_replication_rows(
stream_name, token, rows
@@ -170,7 +173,7 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
yield super(UserDirectoryReplicationHandler, self).on_rdata(
stream_name, token, rows
)
- if stream_name == "current_state_deltas":
+ if stream_name == EventsStream.NAME:
run_in_background(self._notify_directory)
@defer.inlineCallbacks
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 5e83740..5f56ab2 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -147,7 +147,7 @@ class Config(object):
@staticmethod
def read_config_file(file_path):
with open(file_path) as file_stream:
- return yaml.load(file_stream)
+ return yaml.safe_load(file_stream)
def invoke_all(self, name, *args, **kargs):
results = []
@@ -190,9 +190,7 @@ class Config(object):
Returns:
str: the yaml config file
"""
- default_config = "# vim:ft=yaml\n"
-
- default_config += "\n\n".join(
+ default_config = "\n\n".join(
dedent(conf)
for conf in self.invoke_all(
"default_config",
@@ -226,14 +224,20 @@ class Config(object):
" Defaults to the directory containing the last config file",
)
+ obj = cls()
+
+ obj.invoke_all("add_arguments", config_parser)
+
config_args = config_parser.parse_args(argv)
config_files = find_config_files(search_paths=config_args.config_path)
- obj = cls()
obj.read_config_files(
config_files, keys_directory=config_args.keys_directory, generate_keys=False
)
+
+ obj.invoke_all("read_arguments", config_args)
+
return obj
@classmethod
@@ -307,19 +311,26 @@ class Config(object):
"Must specify a server_name to a generate config for."
" Pass -H server.name."
)
+
+ config_str = obj.generate_config(
+ config_dir_path=config_dir_path,
+ data_dir_path=os.getcwd(),
+ server_name=server_name,
+ report_stats=(config_args.report_stats == "yes"),
+ generate_secrets=True,
+ )
+
if not cls.path_exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "w") as config_file:
- config_str = obj.generate_config(
- config_dir_path=config_dir_path,
- data_dir_path=os.getcwd(),
- server_name=server_name,
- report_stats=(config_args.report_stats == "yes"),
- generate_secrets=True,
+ config_file.write(
+ "# vim:ft=yaml\n\n"
)
- config = yaml.load(config_str)
- obj.invoke_all("generate_files", config)
config_file.write(config_str)
+
+ config = yaml.safe_load(config_str)
+ obj.invoke_all("generate_files", config)
+
print(
(
"A config file has been generated in %r for server name"
@@ -389,7 +400,7 @@ class Config(object):
server_name=server_name,
generate_secrets=False,
)
- config = yaml.load(config_string)
+ config = yaml.safe_load(config_string)
config.pop("log_config")
config.update(specified_config)
@@ -404,7 +415,10 @@ class Config(object):
self.invoke_all("generate_files", config)
return
- self.invoke_all("read_config", config)
+ self.parse_config_dict(config)
+
+ def parse_config_dict(self, config_dict):
+ self.invoke_all("read_config", config_dict)
def find_config_files(search_paths):
diff --git a/synapse/config/api.py b/synapse/config/api.py
index e8a753f..5eb4f86 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -34,10 +34,10 @@ class ApiConfig(Config):
# A list of event types that will be included in the room_invite_state
#
- room_invite_state_types:
- - "{JoinRules}"
- - "{CanonicalAlias}"
- - "{RoomAvatar}"
- - "{RoomEncryption}"
- - "{Name}"
+ #room_invite_state_types:
+ # - "{JoinRules}"
+ # - "{CanonicalAlias}"
+ # - "{RoomAvatar}"
+ # - "{RoomEncryption}"
+ # - "{Name}"
""".format(**vars(EventTypes))
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index c260d59..7e89d34 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -37,14 +37,16 @@ class AppServiceConfig(Config):
def default_config(cls, **kwargs):
return """\
- # A list of application service config file to use
+ # A list of application service config files to use
#
- app_service_config_files: []
+ #app_service_config_files:
+ # - app_service_1.yaml
+ # - app_service_2.yaml
- # Whether or not to track application service IP addresses. Implicitly
+ # Uncomment to enable tracking of application service IP addresses. Implicitly
# enables MAU tracking for application service users.
#
- track_appservice_user_ips: False
+ #track_appservice_user_ips: True
"""
@@ -66,7 +68,7 @@ def load_appservices(hostname, config_files):
try:
with open(config_file, 'r') as f:
appservice = _load_appservice(
- hostname, yaml.load(f), config_file
+ hostname, yaml.safe_load(f), config_file
)
if appservice.id in seen_ids:
raise ConfigError(
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index d25196b..f7eebf2 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -18,11 +18,16 @@ from ._base import Config
class CaptchaConfig(Config):
def read_config(self, config):
- self.recaptcha_private_key = config["recaptcha_private_key"]
- self.recaptcha_public_key = config["recaptcha_public_key"]
- self.enable_registration_captcha = config["enable_registration_captcha"]
+ self.recaptcha_private_key = config.get("recaptcha_private_key")
+ self.recaptcha_public_key = config.get("recaptcha_public_key")
+ self.enable_registration_captcha = config.get(
+ "enable_registration_captcha", False
+ )
self.captcha_bypass_secret = config.get("captcha_bypass_secret")
- self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
+ self.recaptcha_siteverify_api = config.get(
+ "recaptcha_siteverify_api",
+ "https://www.recaptcha.net/recaptcha/api/siteverify",
+ )
def default_config(self, **kwargs):
return """\
@@ -31,21 +36,23 @@ class CaptchaConfig(Config):
# This Home Server's ReCAPTCHA public key.
#
- recaptcha_public_key: "YOUR_PUBLIC_KEY"
+ #recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This Home Server's ReCAPTCHA private key.
#
- recaptcha_private_key: "YOUR_PRIVATE_KEY"
+ #recaptcha_private_key: "YOUR_PRIVATE_KEY"
# Enables ReCaptcha checks when registering, preventing signup
# unless a captcha is answered. Requires a valid ReCaptcha
# public/private key.
#
- enable_registration_captcha: False
+ #enable_registration_captcha: false
# A secret key used to bypass the captcha test entirely.
+ #
#captcha_bypass_secret: "YOUR_SECRET_HERE"
# The API endpoint to use for verifying m.login.recaptcha responses.
- recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
+ #
+ #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
"""
diff --git a/synapse/config/database.py b/synapse/config/database.py
index c889014..3c27ed6 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -49,7 +49,8 @@ class DatabaseConfig(Config):
def default_config(self, data_dir_path, **kwargs):
database_path = os.path.join(data_dir_path, "homeserver.db")
return """\
- # Database configuration
+ ## Database ##
+
database:
# The database engine name
name: "sqlite3"
@@ -59,7 +60,8 @@ class DatabaseConfig(Config):
database: "%(database_path)s"
# Number of events to cache in memory.
- event_cache_size: "10K"
+ #
+ #event_cache_size: 10K
""" % locals()
def read_arguments(self, args):
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 93d70cf..342a6ce 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -71,6 +71,12 @@ class EmailConfig(Config):
self.email_notif_from = email_config["notif_from"]
self.email_notif_template_html = email_config["notif_template_html"]
self.email_notif_template_text = email_config["notif_template_text"]
+ self.email_expiry_template_html = email_config.get(
+ "expiry_template_html", "notice_expiry.html",
+ )
+ self.email_expiry_template_text = email_config.get(
+ "expiry_template_text", "notice_expiry.txt",
+ )
template_dir = email_config.get("template_dir")
# we need an absolute path, because we change directory after starting (and
@@ -120,7 +126,7 @@ class EmailConfig(Config):
def default_config(self, config_dir_path, server_name, **kwargs):
return """
- # Enable sending emails for notification events
+ # Enable sending emails for notification events or expiry notices
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
@@ -142,6 +148,9 @@ class EmailConfig(Config):
# #template_dir: res/templates
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
+ # # Templates for account expiry notices.
+ # expiry_template_html: notice_expiry.html
+ # expiry_template_text: notice_expiry.txt
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
"""
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
index 46933a9..e4be172 100644
--- a/synapse/config/groups.py
+++ b/synapse/config/groups.py
@@ -23,9 +23,9 @@ class GroupsConfig(Config):
def default_config(self, **kwargs):
return """\
- # Whether to allow non server admins to create groups on this server
+ # Uncomment to allow non-server-admin users to create groups on this server
#
- enable_group_creation: false
+ #enable_group_creation: true
# If enabled, non server admins can only create groups with local parts
# starting with this prefix
diff --git a/synapse/config/key.py b/synapse/config/key.py
index 81c54b2..eb10259 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -38,16 +38,27 @@ logger = logging.getLogger(__name__)
class KeyConfig(Config):
def read_config(self, config):
- self.signing_key = self.read_signing_key(config["signing_key_path"])
- self.signing_key_path = config["signing_key_path"]
+ # the signing key can be specified inline or in a separate file
+ if "signing_key" in config:
+ self.signing_key = read_signing_keys([config["signing_key"]])
+ else:
+ self.signing_key_path = config["signing_key_path"]
+ self.signing_key = self.read_signing_key(self.signing_key_path)
+
self.old_signing_keys = self.read_old_signing_keys(
config.get("old_signing_keys", {})
)
self.key_refresh_interval = self.parse_duration(
- config["key_refresh_interval"]
+ config.get("key_refresh_interval", "1d"),
)
self.perspectives = self.read_perspectives(
- config["perspectives"]
+ config.get("perspectives", {}).get("servers", {
+ "matrix.org": {"verify_keys": {
+ "ed25519:auto": {
+ "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
+ }
+ }}
+ })
)
self.macaroon_secret_key = config.get(
@@ -89,7 +100,7 @@ class KeyConfig(Config):
# Used to enable access token expiration.
#
- expire_access_token: False
+ #expire_access_token: False
# a secret which is used to calculate HMACs for form values, to stop
# falsification of values. Must be specified for the User Consent
@@ -118,21 +129,21 @@ class KeyConfig(Config):
# Determines how quickly servers will query to check which keys
# are still valid.
#
- key_refresh_interval: "1d" # 1 Day.
+ #key_refresh_interval: 1d
# The trusted servers to download signing keys from.
#
- perspectives:
- servers:
- "matrix.org":
- verify_keys:
- "ed25519:auto":
- key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
+ #perspectives:
+ # servers:
+ # "matrix.org":
+ # verify_keys:
+ # "ed25519:auto":
+ # key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
""" % locals()
- def read_perspectives(self, perspectives_config):
+ def read_perspectives(self, perspectives_servers):
servers = {}
- for server_name, server_config in perspectives_config["servers"].items():
+ for server_name, server_config in perspectives_servers.items():
for key_id, key_data in server_config["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index f6940b6..c1febbe 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -81,7 +81,9 @@ class LoggingConfig(Config):
def default_config(self, config_dir_path, server_name, **kwargs):
log_config = os.path.join(config_dir_path, server_name + ".log.config")
- return """
+ return """\
+ ## Logging ##
+
# A yaml python logging config file
#
log_config: "%(log_config)s"
@@ -193,7 +195,7 @@ def setup_logging(config, use_worker_options=False):
else:
def load_log_config():
with open(log_config, 'r') as f:
- logging.config.dictConfig(yaml.load(f))
+ logging.config.dictConfig(yaml.safe_load(f))
def sighup(*args):
# it might be better to use a file watcher or something for this.
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index ed0498c..2de5197 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -24,7 +24,7 @@ MISSING_SENTRY = (
class MetricsConfig(Config):
def read_config(self, config):
- self.enable_metrics = config["enable_metrics"]
+ self.enable_metrics = config.get("enable_metrics", False)
self.report_stats = config.get("report_stats", None)
self.metrics_port = config.get("metrics_port")
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
@@ -48,7 +48,7 @@ class MetricsConfig(Config):
# Enable collection and rendering of performance metrics
#
- enable_metrics: False
+ #enable_metrics: False
# Enable sentry integration
# NOTE: While attempts are made to ensure that the logs don't contain
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 2a52b9d..eea59e7 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -22,16 +22,21 @@ class PasswordConfig(Config):
def read_config(self, config):
password_config = config.get("password_config", {})
+ if password_config is None:
+ password_config = {}
+
self.password_enabled = password_config.get("enabled", True)
self.password_pepper = password_config.get("pepper", "")
def default_config(self, config_dir_path, server_name, **kwargs):
- return """
- # Enable password for login.
- #
+ return """\
password_config:
- enabled: true
+ # Uncomment to disable password login
+ #
+ #enabled: false
+
# Uncomment and change to a secret random string for extra security.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
- #pepper: ""
+ #
+ #pepper: "EVEN_MORE_SECRET"
"""
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 54b71e6..5a9adac 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -15,51 +15,143 @@
from ._base import Config
-class RatelimitConfig(Config):
+class RateLimitConfig(object):
+ def __init__(self, config, defaults={"per_second": 0.17, "burst_count": 3.0}):
+ self.per_second = config.get("per_second", defaults["per_second"])
+ self.burst_count = config.get("burst_count", defaults["burst_count"])
+
+
+class FederationRateLimitConfig(object):
+ _items_and_default = {
+ "window_size": 10000,
+ "sleep_limit": 10,
+ "sleep_delay": 500,
+ "reject_limit": 50,
+ "concurrent": 3,
+ }
+
+ def __init__(self, **kwargs):
+ for i in self._items_and_default.keys():
+ setattr(self, i, kwargs.get(i) or self._items_and_default[i])
+
+class RatelimitConfig(Config):
def read_config(self, config):
- self.rc_messages_per_second = config["rc_messages_per_second"]
- self.rc_message_burst_count = config["rc_message_burst_count"]
- self.federation_rc_window_size = config["federation_rc_window_size"]
- self.federation_rc_sleep_limit = config["federation_rc_sleep_limit"]
- self.federation_rc_sleep_delay = config["federation_rc_sleep_delay"]
- self.federation_rc_reject_limit = config["federation_rc_reject_limit"]
- self.federation_rc_concurrent = config["federation_rc_concurrent"]
+ # Load the new-style messages config if it exists. Otherwise fall back
+ # to the old method.
+ if "rc_message" in config:
+ self.rc_message = RateLimitConfig(
+ config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0}
+ )
+ else:
+ self.rc_message = RateLimitConfig(
+ {
+ "per_second": config.get("rc_messages_per_second", 0.2),
+ "burst_count": config.get("rc_message_burst_count", 10.0),
+ }
+ )
+
+ # Load the new-style federation config, if it exists. Otherwise, fall
+ # back to the old method.
+ if "federation_rc" in config:
+ self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
+ else:
+ self.rc_federation = FederationRateLimitConfig(
+ **{
+ "window_size": config.get("federation_rc_window_size"),
+ "sleep_limit": config.get("federation_rc_sleep_limit"),
+ "sleep_delay": config.get("federation_rc_sleep_delay"),
+ "reject_limit": config.get("federation_rc_reject_limit"),
+ "concurrent": config.get("federation_rc_concurrent"),
+ }
+ )
+
+ self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
+
+ rc_login_config = config.get("rc_login", {})
+ self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
+ self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
+ self.rc_login_failed_attempts = RateLimitConfig(
+ rc_login_config.get("failed_attempts", {})
+ )
+
+ self.federation_rr_transactions_per_room_per_second = config.get(
+ "federation_rr_transactions_per_room_per_second", 50
+ )
def default_config(self, **kwargs):
return """\
## Ratelimiting ##
- # Number of messages a client can send per second
+ # Ratelimiting settings for client actions (registration, login, messaging).
#
- rc_messages_per_second: 0.2
-
- # Number of message a client can send before being throttled
+ # Each ratelimiting configuration is made of two parameters:
+ # - per_second: number of requests a client can send per second.
+ # - burst_count: number of requests a client can send before being throttled.
#
- rc_message_burst_count: 10.0
-
- # The federation window size in milliseconds
+ # Synapse currently uses the following configurations:
+ # - one for messages that ratelimits sending based on the account the client
+ # is using
+ # - one for registration that ratelimits registration requests based on the
+ # client's IP address.
+ # - one for login that ratelimits login requests based on the client's IP
+ # address.
+ # - one for login that ratelimits login requests based on the account the
+ # client is attempting to log into.
+ # - one for login that ratelimits login requests based on the account the
+ # client is attempting to log into, based on the amount of failed login
+ # attempts for this account.
#
- federation_rc_window_size: 1000
-
- # The number of federation requests from a single server in a window
- # before the server will delay processing the request.
+ # The defaults are as shown below.
#
- federation_rc_sleep_limit: 10
-
- # The duration in milliseconds to delay processing events from
- # remote servers by if they go over the sleep limit.
+ #rc_message:
+ # per_second: 0.2
+ # burst_count: 10
+ #
+ #rc_registration:
+ # per_second: 0.17
+ # burst_count: 3
#
- federation_rc_sleep_delay: 500
+ #rc_login:
+ # address:
+ # per_second: 0.17
+ # burst_count: 3
+ # account:
+ # per_second: 0.17
+ # burst_count: 3
+ # failed_attempts:
+ # per_second: 0.17
+ # burst_count: 3
- # The maximum number of concurrent federation requests allowed
- # from a single server
+
+ # Ratelimiting settings for incoming federation
+ #
+ # The rc_federation configuration is made up of the following settings:
+ # - window_size: window size in milliseconds
+ # - sleep_limit: number of federation requests from a single server in
+ # a window before the server will delay processing the request.
+ # - sleep_delay: duration in milliseconds to delay processing events
+ # from remote servers by if they go over the sleep limit.
+ # - reject_limit: maximum number of concurrent federation requests
+ # allowed from a single server
+ # - concurrent: number of federation requests to concurrently process
+ # from a single server
#
- federation_rc_reject_limit: 50
+ # The defaults are as shown below.
+ #
+ #rc_federation:
+ # window_size: 1000
+ # sleep_limit: 10
+ # sleep_delay: 500
+ # reject_limit: 50
+ # concurrent: 3
- # The number of federation requests to concurrently process from a
- # single server
+ # Target outgoing federation transaction frequency for sending read-receipts,
+ # per-room.
+ #
+ # If we end up trying to send out more read-receipts, they will get buffered up
+ # into fewer transactions.
#
- federation_rc_concurrent: 3
+ #federation_rr_transactions_per_room_per_second: 50
"""
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 2881482..693288f 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -20,23 +20,54 @@ from synapse.types import RoomAlias
from synapse.util.stringutils import random_string_with_symbols
+class AccountValidityConfig(Config):
+ def __init__(self, config, synapse_config):
+ self.enabled = config.get("enabled", False)
+ self.renew_by_email_enabled = ("renew_at" in config)
+
+ if self.enabled:
+ if "period" in config:
+ self.period = self.parse_duration(config["period"])
+ else:
+ raise ConfigError("'period' is required when using account validity")
+
+ if "renew_at" in config:
+ self.renew_at = self.parse_duration(config["renew_at"])
+
+ if "renew_email_subject" in config:
+ self.renew_email_subject = config["renew_email_subject"]
+ else:
+ self.renew_email_subject = "Renew your %(app)s account"
+
+ if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
+ raise ConfigError("Can't send renewal emails without 'public_baseurl'")
+
+
class RegistrationConfig(Config):
def read_config(self, config):
self.enable_registration = bool(
- strtobool(str(config["enable_registration"]))
+ strtobool(str(config.get("enable_registration", False)))
)
if "disable_registration" in config:
self.enable_registration = not bool(
strtobool(str(config["disable_registration"]))
)
+ self.account_validity = AccountValidityConfig(
+ config.get("account_validity", {}), config,
+ )
+
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
+ self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
self.registration_shared_secret = config.get("registration_shared_secret")
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
- self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
+ self.trusted_third_party_id_servers = config.get(
+ "trusted_third_party_id_servers",
+ ["matrix.org", "vector.im"],
+ )
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@@ -64,9 +95,47 @@ class RegistrationConfig(Config):
return """\
## Registration ##
+ #
+ # Registration can be rate-limited using the parameters in the "Ratelimiting"
+ # section of this file.
# Enable registration for new users.
- enable_registration: False
+ #
+ #enable_registration: false
+
+ # Optional account validity configuration. This allows for accounts to be denied
+ # any request after a given period.
+ #
+ # ``enabled`` defines whether the account validity feature is enabled. Defaults
+ # to False.
+ #
+ # ``period`` allows setting the period after which an account is valid
+ # after its registration. When renewing the account, its validity period
+ # will be extended by this amount of time. This parameter is required when using
+ # the account validity feature.
+ #
+ # ``renew_at`` is the amount of time before an account's expiry date at which
+ # Synapse will send an email to the account's email address with a renewal link.
+ # This needs the ``email`` and ``public_baseurl`` configuration sections to be
+ # filled.
+ #
+ # ``renew_email_subject`` is the subject of the email sent out with the renewal
+ # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter
+ # from the ``email`` section.
+ #
+ # Once this feature is enabled, Synapse will look for registered users without an
+ # expiration date at startup and will add one to every account it found using the
+ # current settings at that time.
+ # This means that, if a validity period is set, and Synapse is restarted (it will
+ # then derive an expiration date from the current validity period), and some time
+ # after that the validity period changes and Synapse is restarted, the users'
+ # expiration dates won't be updated unless their account is manually renewed.
+ #
+ #account_validity:
+ # enabled: True
+ # period: 6w
+ # renew_at: 1w
+ # renew_email_subject: "Renew your %%(app)s account"
# The user must provide all of the below types of 3PID when registering.
#
@@ -77,7 +146,7 @@ class RegistrationConfig(Config):
# Explicitly disable asking for MSISDNs from the registration
# flow (overrides registrations_require_3pid if MSISDNs are set as required)
#
- #disable_msisdn_registration: True
+ #disable_msisdn_registration: true
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
@@ -90,8 +159,12 @@ class RegistrationConfig(Config):
# - medium: msisdn
# pattern: '\\+44'
- # If set, allows registration by anyone who also has the shared
- # secret, even if registration is otherwise disabled.
+ # Enable 3PIDs lookup requests to identity servers from this server.
+ #
+ #enable_3pid_lookup: true
+
+ # If set, allows registration of standard or admin accounts by anyone who
+ # has the shared secret, even if registration is otherwise disabled.
#
%(registration_shared_secret)s
@@ -101,13 +174,13 @@ class RegistrationConfig(Config):
# N.B. that increasing this will exponentially increase the time required
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
#
- bcrypt_rounds: 12
+ #bcrypt_rounds: 12
# Allows users to register as guests without a password/email/etc, and
# participate in rooms hosted on this server which have been made
# accessible to anonymous users.
#
- allow_guest_access: False
+ #allow_guest_access: false
# The identity server which we suggest that clients should use when users log
# in on this server.
@@ -123,9 +196,9 @@ class RegistrationConfig(Config):
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
- trusted_third_party_id_servers:
- - matrix.org
- - vector.im
+ #trusted_third_party_id_servers:
+ # - matrix.org
+ # - vector.im
# Users who register on this homeserver will automatically be joined
# to these rooms
@@ -139,7 +212,7 @@ class RegistrationConfig(Config):
# Setting to false means that if the rooms are not manually created,
# users cannot be auto-joined since they do not exist.
#
- autocreate_auto_join_rooms: true
+ #autocreate_auto_join_rooms: true
""" % locals()
def add_arguments(self, parser):
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index d20cda9..da2b32b 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -19,6 +19,36 @@ from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
+DEFAULT_THUMBNAIL_SIZES = [
+ {
+ "width": 32,
+ "height": 32,
+ "method": "crop",
+ }, {
+ "width": 96,
+ "height": 96,
+ "method": "crop",
+ }, {
+ "width": 320,
+ "height": 240,
+ "method": "scale",
+ }, {
+ "width": 640,
+ "height": 480,
+ "method": "scale",
+ }, {
+ "width": 800,
+ "height": 600,
+ "method": "scale"
+ },
+]
+
+THUMBNAIL_SIZE_YAML = """\
+ # - width: %(width)i
+ # height: %(height)i
+ # method: %(method)s
+"""
+
MISSING_NETADDR = (
"Missing netaddr library. This is required for URL preview API."
)
@@ -75,9 +105,9 @@ def parse_thumbnail_requirements(thumbnail_sizes):
class ContentRepositoryConfig(Config):
def read_config(self, config):
- self.max_upload_size = self.parse_size(config["max_upload_size"])
- self.max_image_pixels = self.parse_size(config["max_image_pixels"])
- self.max_spider_size = self.parse_size(config["max_spider_size"])
+ self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
+ self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
+ self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
self.media_store_path = self.ensure_directory(config["media_store_path"])
@@ -137,9 +167,9 @@ class ContentRepositoryConfig(Config):
)
self.uploads_path = self.ensure_directory(config["uploads_path"])
- self.dynamic_thumbnails = config["dynamic_thumbnails"]
+ self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
self.thumbnail_requirements = parse_thumbnail_requirements(
- config["thumbnail_sizes"]
+ config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES),
)
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
@@ -180,6 +210,13 @@ class ContentRepositoryConfig(Config):
def default_config(self, data_dir_path, **kwargs):
media_store = os.path.join(data_dir_path, "media_store")
uploads_path = os.path.join(data_dir_path, "uploads")
+
+ formatted_thumbnail_sizes = "".join(
+ THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES
+ )
+ # strip final NL
+ formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]
+
return r"""
# Directory where uploaded images and attachments are stored.
#
@@ -206,11 +243,11 @@ class ContentRepositoryConfig(Config):
# The largest allowed upload size in bytes
#
- max_upload_size: "10M"
+ #max_upload_size: 10M
# Maximum number of pixels that will be thumbnailed
#
- max_image_pixels: "32M"
+ #max_image_pixels: 32M
# Whether to generate new thumbnails on the fly to precisely match
# the resolution requested by the client. If true then whenever
@@ -218,32 +255,19 @@ class ContentRepositoryConfig(Config):
# generate a new thumbnail. If false the server will pick a thumbnail
# from a precalculated list.
#
- dynamic_thumbnails: false
+ #dynamic_thumbnails: false
# List of thumbnails to precalculate when an image is uploaded.
#
- thumbnail_sizes:
- - width: 32
- height: 32
- method: crop
- - width: 96
- height: 96
- method: crop
- - width: 320
- height: 240
- method: scale
- - width: 640
- height: 480
- method: scale
- - width: 800
- height: 600
- method: scale
+ #thumbnail_sizes:
+%(formatted_thumbnail_sizes)s
# Is the preview URL API enabled?
- # 'False' by default: uncomment the following to enable it (and specify a
+ #
+ # 'false' by default: uncomment the following to enable it (and specify a
# url_preview_ip_range_blacklist blacklist).
#
- #url_preview_enabled: True
+ #url_preview_enabled: true
# List of IP address CIDR ranges that the URL preview spider is denied
# from accessing. There are no defaults: you must explicitly
@@ -253,8 +277,11 @@ class ContentRepositoryConfig(Config):
# synapse to issue arbitrary GET requests to your internal services,
# causing serious security issues.
#
- # This must be specified if url_preview_enabled. It is recommended that you
- # uncomment the following list as a starting point.
+ # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+ # listed here, since they correspond to unroutable addresses.)
+ #
+ # This must be specified if url_preview_enabled is set. It is recommended that
+ # you uncomment the following list as a starting point.
#
#url_preview_ip_range_blacklist:
# - '127.0.0.0/8'
@@ -311,6 +338,6 @@ class ContentRepositoryConfig(Config):
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
# The largest allowed URL preview spidering size in bytes
- max_spider_size: "10M"
-
+ #
+ #max_spider_size: 10M
""" % locals()
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 9b897ab..8a9fded 100644
--- a/synapse/config/room_directory.py
+++ b/synapse/config/room_directory.py
@@ -20,6 +20,10 @@ from ._base import Config, ConfigError
class RoomDirectoryConfig(Config):
def read_config(self, config):
+ self.enable_room_list_search = config.get(
+ "enable_room_list_search", True,
+ )
+
alias_creation_rules = config.get("alias_creation_rules")
if alias_creation_rules is not None:
@@ -54,6 +58,12 @@ class RoomDirectoryConfig(Config):
def default_config(self, config_dir_path, server_name, **kwargs):
return """
+ # Uncomment to disable searching the public room list. When disabled
+ # blocks searching local and remote room lists for local and remote
+ # users by always returning an empty list for all queries.
+ #
+ #enable_room_list_search: false
+
# The `alias_creation` option controls who's allowed to create aliases
# on this server.
#
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index aff0a1f..aa6eac2 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2018 New Vector Ltd.
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -64,7 +64,7 @@ class SAML2Config(Config):
}
def default_config(self, config_dir_path, server_name, **kwargs):
- return """
+ return """\
# Enable SAML2 for registration and login. Uses pysaml2.
#
# `sp_config` is the configuration for the pysaml2 Service Provider.
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 4200f10..f34aa42 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,6 +18,8 @@
import logging
import os.path
+from netaddr import IPSet
+
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.python_dependencies import DependencyException, check_requirements
@@ -37,6 +40,7 @@ class ServerConfig(Config):
def read_config(self, config):
self.server_name = config["server_name"]
+ self.server_context = config.get("server_context", None)
try:
parse_and_validate_server_name(self.server_name)
@@ -45,7 +49,7 @@ class ServerConfig(Config):
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client_location = config.get("web_client_location", None)
- self.soft_file_limit = config["soft_file_limit"]
+ self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.user_agent_suffix = config.get("user_agent_suffix")
@@ -71,6 +75,19 @@ class ServerConfig(Config):
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
+ # Whether to require authentication to retrieve profile data (avatars,
+ # display names) of other users through the client API.
+ self.require_auth_for_profile_requests = config.get(
+ "require_auth_for_profile_requests", False,
+ )
+
+ # If set to 'True', requires authentication to access the server's
+ # public rooms directory through the client API, and forbids any other
+ # homeserver to fetch it via federation.
+ self.restrict_public_rooms_to_local_users = config.get(
+ "restrict_public_rooms_to_local_users", False,
+ )
+
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
@@ -84,6 +101,11 @@ class ServerConfig(Config):
"block_non_admin_invites", False,
)
+ # Whether to enable experimental MSC1849 (aka relations) support
+ self.experimental_msc1849_support_enabled = config.get(
+ "experimental_msc1849_support_enabled", False,
+ )
+
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
@@ -113,19 +135,54 @@ class ServerConfig(Config):
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None
federation_domain_whitelist = config.get(
- "federation_domain_whitelist", None
+ "federation_domain_whitelist", None,
)
- # turn the whitelist into a hash for speed of lookup
+
if federation_domain_whitelist is not None:
+ # turn the whitelist into a hash for speed of lookup
self.federation_domain_whitelist = {}
+
for domain in federation_domain_whitelist:
self.federation_domain_whitelist[domain] = True
+ self.federation_ip_range_blacklist = config.get(
+ "federation_ip_range_blacklist", [],
+ )
+
+ # Attempt to create an IPSet from the given ranges
+ try:
+ self.federation_ip_range_blacklist = IPSet(
+ self.federation_ip_range_blacklist
+ )
+
+ # Always blacklist 0.0.0.0, ::
+ self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
+ except Exception as e:
+ raise ConfigError(
+ "Invalid range(s) provided in "
+ "federation_ip_range_blacklist: %s" % e
+ )
+
if self.public_baseurl is not None:
if self.public_baseurl[-1] != '/':
self.public_baseurl += '/'
self.start_pushers = config.get("start_pushers", True)
+ # (undocumented) option for torturing the worker-mode replication a bit,
+ # for testing. The value defines the number of milliseconds to pause before
+ # sending out any replication updates.
+ self.replication_torture_level = config.get("replication_torture_level")
+
+ # Whether to require a user to be in the room to add an alias to it.
+ # Defaults to True.
+ self.require_membership_for_aliases = config.get(
+ "require_membership_for_aliases", True,
+ )
+
+ # Whether to allow per-room membership profiles through the send of membership
+ # events with profile information that differ from the target's global profile.
+ self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
+
self.listeners = []
for listener in config.get("listeners", []):
if not isinstance(listener.get("port", None), int):
@@ -260,9 +317,11 @@ class ServerConfig(Config):
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
# This is also the last part of your UserID.
+ #
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
+ #
pid_file: %(pid_file)s
# CPU affinity mask. Setting this restricts the CPUs on which the
@@ -304,10 +363,26 @@ class ServerConfig(Config):
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
- soft_file_limit: 0
+ #
+ #soft_file_limit: 0
# Set to false to disable presence tracking on this homeserver.
- use_presence: true
+ #
+ #use_presence: false
+
+ # Whether to require authentication to retrieve profile data (avatars,
+ # display names) of other users through the client API. Defaults to
+ # 'false'. Note that profile data is also available via the federation
+ # API, so this setting is of limited value if federation is enabled on
+ # the server.
+ #
+ #require_auth_for_profile_requests: true
+
+ # If set to 'true', requires authentication to access the server's
+ # public rooms directory through the client API, and forbids any other
+ # homeserver to fetch it via federation. Defaults to 'false'.
+ #
+ #restrict_public_rooms_to_local_users: true
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
@@ -341,6 +416,24 @@ class ServerConfig(Config):
# - nyc.example.com
# - syd.example.com
+ # Prevent federation requests from being sent to the following
+ # blacklist IP address CIDR ranges. If this option is not specified, or
+ # specified with an empty list, no ip range blacklist will be enforced.
+ #
+ # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+ # listed here, since they correspond to unroutable addresses.)
+ #
+ federation_ip_range_blacklist:
+ - '127.0.0.0/8'
+ - '10.0.0.0/8'
+ - '172.16.0.0/12'
+ - '192.168.0.0/16'
+ - '100.64.0.0/10'
+ - '169.254.0.0/16'
+ - '::1/128'
+ - 'fe80::/64'
+ - 'fc00::/7'
+
# List of ports that Synapse should listen on, their purpose and their
# configuration.
#
@@ -376,8 +469,8 @@ class ServerConfig(Config):
#
# Valid resource names are:
#
- # client: the client-server API (/_matrix/client). Also implies 'media' and
- # 'static'.
+ # client: the client-server API (/_matrix/client), and the synapse admin
+ # API (/_synapse/admin). Also implies 'media' and 'static'.
#
# consent: user consent forms (/_matrix/consent). See
# docs/consent_tracking.md.
@@ -475,6 +568,20 @@ class ServerConfig(Config):
#mau_limit_reserved_threepids:
# - medium: 'email'
# address: 'reserved_user@example.com'
+
+ # Used by phonehome stats to group together related servers.
+ #server_context: context
+
+ # Whether to require a user to be in the room to add an alias to it.
+ # Defaults to 'true'.
+ #
+ #require_membership_for_aliases: false
+
+ # Whether to allow per-room membership profiles through the send of membership
+ # events with profile information that differ from the target's global profile.
+ # Defaults to 'true'.
+ #
+ #allow_per_room_profiles: false
""" % locals()
def read_arguments(self, args):
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 40045de..72dd592 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -24,8 +24,10 @@ import six
from unpaddedbase64 import encode_base64
from OpenSSL import crypto
+from twisted.internet._sslverify import Certificate, trustRootFromCertificates
from synapse.config._base import Config, ConfigError
+from synapse.util import glob_to_regex
logger = logging.getLogger(__name__)
@@ -70,6 +72,53 @@ class TlsConfig(Config):
self.tls_fingerprints = list(self._original_tls_fingerprints)
+ # Whether to verify certificates on outbound federation traffic
+ self.federation_verify_certificates = config.get(
+ "federation_verify_certificates", False,
+ )
+
+ # Whitelist of domains to not verify certificates for
+ fed_whitelist_entries = config.get(
+ "federation_certificate_verification_whitelist", [],
+ )
+
+ # Support globs (*) in whitelist values
+ self.federation_certificate_verification_whitelist = []
+ for entry in fed_whitelist_entries:
+ # Convert globs to regex
+ entry_regex = glob_to_regex(entry)
+ self.federation_certificate_verification_whitelist.append(entry_regex)
+
+ # List of custom certificate authorities for federation traffic validation
+ custom_ca_list = config.get(
+ "federation_custom_ca_list", None,
+ )
+
+ # Read in and parse custom CA certificates
+ self.federation_ca_trust_root = None
+ if custom_ca_list is not None:
+ if len(custom_ca_list) == 0:
+ # A trustroot cannot be generated without any CA certificates.
+ # Raise an error if this option has been specified without any
+ # corresponding certificates.
+ raise ConfigError("federation_custom_ca_list specified without "
+ "any certificate files")
+
+ certs = []
+ for ca_file in custom_ca_list:
+ logger.debug("Reading custom CA certificate file: %s", ca_file)
+ content = self.read_file(ca_file)
+
+ # Parse the CA certificates
+ try:
+ cert_base = Certificate.loadPEM(content)
+ certs.append(cert_base)
+ except Exception as e:
+ raise ConfigError("Error parsing custom CA certificate file %s: %s"
+ % (ca_file, e))
+
+ self.federation_ca_trust_root = trustRootFromCertificates(certs)
+
# This config option applies to non-federation HTTP clients
# (e.g. for talking to recaptcha, identity servers, and such)
# It should never be used in production, and is intended for
@@ -99,15 +148,15 @@ class TlsConfig(Config):
try:
with open(self.tls_certificate_file, 'rb') as f:
cert_pem = f.read()
- except Exception:
- logger.exception("Failed to read existing certificate off disk!")
- raise
+ except Exception as e:
+ raise ConfigError("Failed to read existing certificate file %s: %s"
+ % (self.tls_certificate_file, e))
try:
tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
- except Exception:
- logger.exception("Failed to parse existing certificate off disk!")
- raise
+ except Exception as e:
+ raise ConfigError("Failed to parse existing certificate file %s: %s"
+ % (self.tls_certificate_file, e))
if not allow_self_signed:
if tls_certificate.get_subject() == tls_certificate.get_issuer():
@@ -181,12 +230,51 @@ class TlsConfig(Config):
# See 'ACME support' below to enable auto-provisioning this certificate via
# Let's Encrypt.
#
+ # If supplying your own, be sure to use a `.pem` file that includes the
+ # full certificate chain including any intermediate certificates (for
+ # instance, if using certbot, use `fullchain.pem` as your certificate,
+ # not `cert.pem`).
+ #
#tls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
#
#tls_private_key_path: "%(tls_private_key_path)s"
+ # Whether to verify TLS certificates when sending federation traffic.
+ #
+ # This currently defaults to `false`, however this will change in
+ # Synapse 1.0 when valid federation certificates will be required.
+ #
+ #federation_verify_certificates: true
+
+ # Skip federation certificate verification on the following whitelist
+ # of domains.
+ #
+ # This setting should only be used in very specific cases, such as
+ # federation over Tor hidden services and similar. For private networks
+ # of homeservers, you likely want to use a private CA instead.
+ #
+ # Only effective if federation_verify_certicates is `true`.
+ #
+ #federation_certificate_verification_whitelist:
+ # - lon.example.com
+ # - *.domain.com
+ # - *.onion
+
+ # List of custom certificate authorities for federation traffic.
+ #
+ # This setting should only normally be used within a private network of
+ # homeservers.
+ #
+ # Note that this list will replace those that are provided by your
+ # operating environment. Certificates must be in PEM format.
+ #
+ #federation_custom_ca_list:
+ # - myCA1.pem
+ # - myCA2.pem
+ # - myCA3.pem
+
# ACME support: This will configure Synapse to request a valid TLS certificate
# for your configured `server_name` via Let's Encrypt.
#
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index fab3a7d..142754a 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -22,9 +22,13 @@ class UserDirectoryConfig(Config):
"""
def read_config(self, config):
+ self.user_directory_search_enabled = True
self.user_directory_search_all_users = False
user_directory_config = config.get("user_directory", None)
if user_directory_config:
+ self.user_directory_search_enabled = (
+ user_directory_config.get("enabled", True)
+ )
self.user_directory_search_all_users = (
user_directory_config.get("search_all_users", False)
)
@@ -33,6 +37,10 @@ class UserDirectoryConfig(Config):
return """
# User Directory configuration
#
+ # 'enabled' defines whether users can search the user directory. If
+ # false then empty responses are returned to all queries. Defaults to
+ # true.
+ #
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to run
@@ -40,5 +48,6 @@ class UserDirectoryConfig(Config):
# on your database to tell it to rebuild the user_directory search indexes.
#
#user_directory:
+ # enabled: true
# search_all_users: false
"""
diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index 257f7c8..2a1f005 100644
--- a/synapse/config/voip.py
+++ b/synapse/config/voip.py
@@ -22,7 +22,9 @@ class VoipConfig(Config):
self.turn_shared_secret = config.get("turn_shared_secret")
self.turn_username = config.get("turn_username")
self.turn_password = config.get("turn_password")
- self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
+ self.turn_user_lifetime = self.parse_duration(
+ config.get("turn_user_lifetime", "1h"),
+ )
self.turn_allow_guests = config.get("turn_allow_guests", True)
def default_config(self, **kwargs):
@@ -45,7 +47,7 @@ class VoipConfig(Config):
# How long generated TURN credentials last
#
- turn_user_lifetime: "1h"
+ #turn_user_lifetime: 1h
# Whether guests should be allowed to use the TURN server.
# This defaults to True, otherwise VoIP will be unreliable for guests.
@@ -53,5 +55,5 @@ class VoipConfig(Config):
# connect to arbitrary endpoints without having first signed up for a
# valid account (e.g. by passing a CAPTCHA).
#
- turn_allow_guests: True
+ #turn_allow_guests: True
"""
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 80baf0c..bfbd8b6 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -28,7 +28,7 @@ class WorkerConfig(Config):
if self.worker_app == "synapse.app.homeserver":
self.worker_app = None
- self.worker_listeners = config.get("worker_listeners")
+ self.worker_listeners = config.get("worker_listeners", [])
self.worker_daemonize = config.get("worker_daemonize")
self.worker_pid_file = config.get("worker_pid_file")
self.worker_log_file = config.get("worker_log_file")
@@ -48,6 +48,17 @@ class WorkerConfig(Config):
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
+ # This option is really only here to support `--manhole` command line
+ # argument.
+ manhole = config.get("worker_manhole")
+ if manhole:
+ self.worker_listeners.append({
+ "port": manhole,
+ "bind_addresses": ["127.0.0.1"],
+ "type": "manhole",
+ "tls": False,
+ })
+
if self.worker_listeners:
for listener in self.worker_listeners:
bind_address = listener.pop("bind_address", None)
@@ -57,3 +68,18 @@ class WorkerConfig(Config):
bind_addresses.append(bind_address)
elif not bind_addresses:
bind_addresses.append('')
+
+ def read_arguments(self, args):
+ # We support a bunch of command line arguments that override options in
+ # the config. A lot of these options have a worker_* prefix when running
+ # on workers so we also have to override them when command line options
+ # are specified.
+
+ if args.daemonize is not None:
+ self.worker_daemonize = args.daemonize
+ if args.log_config is not None:
+ self.worker_log_config = args.log_config
+ if args.log_file is not None:
+ self.worker_log_file = args.log_file
+ if args.manhole is not None:
+ self.worker_manhole = args.worker_manhole
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 49cbc70..59ea087 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -18,10 +18,10 @@ import logging
from zope.interface import implementer
from OpenSSL import SSL, crypto
-from twisted.internet._sslverify import _defaultCurveName
+from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
-from twisted.internet.ssl import CertificateOptions, ContextFactory
+from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
from twisted.python.failure import Failure
logger = logging.getLogger(__name__)
@@ -90,7 +90,7 @@ def _tolerateErrors(wrapped):
@implementer(IOpenSSLClientConnectionCreator)
-class ClientTLSOptions(object):
+class ClientTLSOptionsNoVerify(object):
"""
Client creator for TLS without certificate identity verification. This is a
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
@@ -127,9 +127,30 @@ class ClientTLSOptionsFactory(object):
to remote servers for federation."""
def __init__(self, config):
- # We don't use config options yet
- self._options = CertificateOptions(verify=False)
+ self._config = config
+ self._options_noverify = CertificateOptions()
+
+ # Check if we're using a custom list of a CA certificates
+ trust_root = config.federation_ca_trust_root
+ if trust_root is None:
+ # Use CA root certs provided by OpenSSL
+ trust_root = platformTrust()
+
+ self._options_verify = CertificateOptions(trustRoot=trust_root)
def get_options(self, host):
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
- return ClientTLSOptions(host, self._options._makeContext())
+
+ # Check if certificate verification has been enabled
+ should_verify = self._config.federation_verify_certificates
+
+ # Check if we've disabled certificate verification for this host
+ if should_verify:
+ for regex in self._config.federation_certificate_verification_whitelist:
+ if regex.match(host):
+ should_verify = False
+ break
+
+ if should_verify:
+ return ClientTLSOptions(host, self._options_verify._makeContext())
+ return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext())
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 7474fd5..d8ba870 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017, 2018 New Vector Ltd.
+# Copyright 2017, 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ from collections import namedtuple
from six import raise_from
from six.moves import urllib
+import nacl.signing
from signedjson.key import (
decode_verify_key_bytes,
encode_verify_key_base64,
@@ -113,40 +114,54 @@ class Keyring(object):
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
+ # a list of VerifyKeyRequests
verify_requests = []
+ handle = preserve_fn(_handle_key_deferred)
- for server_name, json_object in server_and_json:
+ def process(server_name, json_object):
+ """Process an entry in the request list
+ Given a (server_name, json_object) pair from the request list,
+ adds a key request to verify_requests, and returns a deferred which will
+ complete or fail (in the sentinel context) when verification completes.
+ """
key_ids = signature_ids(json_object, server_name)
+
if not key_ids:
- logger.warn("Request from %s: no supported signature keys",
- server_name)
- deferred = defer.fail(SynapseError(
- 400,
- "Not signed with a supported algorithm",
- Codes.UNAUTHORIZED,
- ))
- else:
- deferred = defer.Deferred()
+ return defer.fail(
+ SynapseError(
+ 400,
+ "Not signed by %s" % (server_name,),
+ Codes.UNAUTHORIZED,
+ )
+ )
logger.debug("Verifying for %s with key_ids %s",
server_name, key_ids)
+ # add the key request to the queue, but don't start it off yet.
verify_request = VerifyKeyRequest(
- server_name, key_ids, json_object, deferred
+ server_name, key_ids, json_object, defer.Deferred(),
)
-
verify_requests.append(verify_request)
- run_in_background(self._start_key_lookups, verify_requests)
+ # now run _handle_key_deferred, which will wait for the key request
+ # to complete and then do the verification.
+ #
+ # We want _handle_key_request to log to the right context, so we
+ # wrap it with preserve_fn (aka run_in_background)
+ return handle(verify_request)
- # Pass those keys to handle_key_deferred so that the json object
- # signatures can be verified
- handle = preserve_fn(_handle_key_deferred)
- return [
- handle(rq) for rq in verify_requests
+ results = [
+ process(server_name, json_object)
+ for server_name, json_object in server_and_json
]
+ if verify_requests:
+ run_in_background(self._start_key_lookups, verify_requests)
+
+ return results
+
@defer.inlineCallbacks
def _start_key_lookups(self, verify_requests):
"""Sets off the key fetches for each verify request
@@ -274,10 +289,6 @@ class Keyring(object):
@defer.inlineCallbacks
def do_iterations():
with Measure(self.clock, "get_server_verify_keys"):
- # dict[str, dict[str, VerifyKey]]: results so far.
- # map server_name -> key_id -> VerifyKey
- merged_results = {}
-
# dict[str, set(str)]: keys to fetch for each server
missing_keys = {}
for verify_request in verify_requests:
@@ -287,29 +298,29 @@ class Keyring(object):
for fn in key_fetch_fns:
results = yield fn(missing_keys.items())
- merged_results.update(results)
# We now need to figure out which verify requests we have keys
# for and which we don't
missing_keys = {}
requests_missing_keys = []
for verify_request in verify_requests:
- server_name = verify_request.server_name
- result_keys = merged_results[server_name]
-
if verify_request.deferred.called:
# We've already called this deferred, which probably
# means that we've already found a key for it.
continue
+ server_name = verify_request.server_name
+
+ # see if any of the keys we got this time are sufficient to
+ # complete this VerifyKeyRequest.
+ result_keys = results.get(server_name, {})
for key_id in verify_request.key_ids:
- if key_id in result_keys:
+ key = result_keys.get(key_id)
+ if key:
with PreserveLoggingContext():
- verify_request.deferred.callback((
- server_name,
- key_id,
- result_keys[key_id],
- ))
+ verify_request.deferred.callback(
+ (server_name, key_id, key)
+ )
break
else:
# The else block is only reached if the loop above
@@ -343,27 +354,24 @@ class Keyring(object):
@defer.inlineCallbacks
def get_keys_from_store(self, server_name_and_key_ids):
"""
-
Args:
- server_name_and_key_ids (list[(str, iterable[str])]):
+ server_name_and_key_ids (iterable(Tuple[str, iterable[str]]):
list of (server_name, iterable[key_id]) tuples to fetch keys for
Returns:
- Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
+ Deferred: resolves to dict[str, dict[str, VerifyKey|None]]: map from
server_name -> key_id -> VerifyKey
"""
- res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
- [
- run_in_background(
- self.store.get_server_verify_keys,
- server_name, key_ids,
- ).addCallback(lambda ks, server: (server, ks), server_name)
- for server_name, key_ids in server_name_and_key_ids
- ],
- consumeErrors=True,
- ).addErrback(unwrapFirstError))
-
- defer.returnValue(dict(res))
+ keys_to_fetch = (
+ (server_name, key_id)
+ for server_name, key_ids in server_name_and_key_ids
+ for key_id in key_ids
+ )
+ res = yield self.store.get_server_verify_keys(keys_to_fetch)
+ keys = {}
+ for (server_name, key_id), key in res.items():
+ keys.setdefault(server_name, {})[key_id] = key
+ defer.returnValue(keys)
@defer.inlineCallbacks
def get_keys_from_perspectives(self, server_name_and_key_ids):
@@ -494,11 +502,11 @@ class Keyring(object):
)
processed_response = yield self.process_v2_response(
- perspective_name, response, only_from_server=False
+ perspective_name, response
)
+ server_name = response["server_name"]
- for server_name, response_keys in processed_response.items():
- keys.setdefault(server_name, {}).update(response_keys)
+ keys.setdefault(server_name, {}).update(processed_response)
yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
@@ -517,7 +525,7 @@ class Keyring(object):
@defer.inlineCallbacks
def get_server_verify_key_v2_direct(self, server_name, key_ids):
- keys = {}
+ keys = {} # type: dict[str, nacl.signing.VerifyKey]
for requested_key_id in key_ids:
if requested_key_id in keys:
@@ -542,6 +550,11 @@ class Keyring(object):
or server_name not in response[u"signatures"]):
raise KeyLookupError("Key response not signed by remote server")
+ if response["server_name"] != server_name:
+ raise KeyLookupError("Expected a response for server %r not %r" % (
+ server_name, response["server_name"]
+ ))
+
response_keys = yield self.process_v2_response(
from_server=server_name,
requested_ids=[requested_key_id],
@@ -550,24 +563,45 @@ class Keyring(object):
keys.update(response_keys)
- yield logcontext.make_deferred_yieldable(defer.gatherResults(
- [
- run_in_background(
- self.store_keys,
- server_name=key_server_name,
- from_server=server_name,
- verify_keys=verify_keys,
- )
- for key_server_name, verify_keys in keys.items()
- ],
- consumeErrors=True
- ).addErrback(unwrapFirstError))
-
- defer.returnValue(keys)
+ yield self.store_keys(
+ server_name=server_name,
+ from_server=server_name,
+ verify_keys=keys,
+ )
+ defer.returnValue({server_name: keys})
@defer.inlineCallbacks
- def process_v2_response(self, from_server, response_json,
- requested_ids=[], only_from_server=True):
+ def process_v2_response(
+ self, from_server, response_json, requested_ids=[],
+ ):
+ """Parse a 'Server Keys' structure from the result of a /key request
+
+ This is used to parse either the entirety of the response from
+ GET /_matrix/key/v2/server, or a single entry from the list returned by
+ POST /_matrix/key/v2/query.
+
+ Checks that each signature in the response that claims to come from the origin
+ server is valid. (Does not check that there actually is such a signature, for
+ some reason.)
+
+ Stores the json in server_keys_json so that it can be used for future responses
+ to /_matrix/key/v2/query.
+
+ Args:
+ from_server (str): the name of the server producing this result: either
+ the origin server for a /_matrix/key/v2/server request, or the notary
+ for a /_matrix/key/v2/query.
+
+ response_json (dict): the json-decoded Server Keys response object
+
+ requested_ids (iterable[str]): a list of the key IDs that were requested.
+ We will store the json for these key ids as well as any that are
+ actually in the response
+
+ Returns:
+ Deferred[dict[str, nacl.signing.VerifyKey]]:
+ map from key_id to key object
+ """
time_now_ms = self.clock.time_msec()
response_keys = {}
verify_keys = {}
@@ -589,15 +623,7 @@ class Keyring(object):
verify_key.time_added = time_now_ms
old_verify_keys[key_id] = verify_key
- results = {}
server_name = response_json["server_name"]
- if only_from_server:
- if server_name != from_server:
- raise KeyLookupError(
- "Expected a response for server %r not %r" % (
- from_server, server_name
- )
- )
for key_id in response_json["signatures"].get(server_name, {}):
if key_id not in response_json["verify_keys"]:
raise KeyLookupError(
@@ -633,7 +659,7 @@ class Keyring(object):
self.store.store_server_keys_json,
server_name=server_name,
key_id=key_id,
- from_server=server_name,
+ from_server=from_server,
ts_now_ms=time_now_ms,
ts_expires_ms=ts_valid_until_ms,
key_json_bytes=signed_key_json_bytes,
@@ -643,9 +669,7 @@ class Keyring(object):
consumeErrors=True,
).addErrback(unwrapFirstError))
- results[server_name] = response_keys
-
- defer.returnValue(results)
+ defer.returnValue(response_keys)
def store_keys(self, server_name, from_server, verify_keys):
"""Store a collection of verify keys for a given server
@@ -686,9 +710,9 @@ def _handle_key_deferred(verify_request):
try:
with PreserveLoggingContext():
_, key_id, verify_key = yield verify_request.deferred
- except (IOError, RequestSendFailed) as e:
+ except KeyLookupError as e:
logger.warn(
- "Got IOError when downloading keys for %s: %s %s",
+ "Failed to download keys for %s: %s %s",
server_name, type(e).__name__, str(e),
)
raise SynapseError(
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 8f9e330..203490f 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -20,15 +20,9 @@ from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64
-from synapse.api.constants import (
- KNOWN_ROOM_VERSIONS,
- EventFormatVersions,
- EventTypes,
- JoinRules,
- Membership,
- RoomVersions,
-)
+from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import AuthError, EventSizeError, SynapseError
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
from synapse.types import UserID, get_domain_from_id
logger = logging.getLogger(__name__)
@@ -452,16 +446,18 @@ def check_redaction(room_version, event, auth_events):
if user_level >= redact_level:
return False
- if room_version in (RoomVersions.V1, RoomVersions.V2,):
+ v = KNOWN_ROOM_VERSIONS.get(room_version)
+ if not v:
+ raise RuntimeError("Unrecognized room version %r" % (room_version,))
+
+ if v.event_format == EventFormatVersions.V1:
redacter_domain = get_domain_from_id(event.event_id)
redactee_domain = get_domain_from_id(event.redacts)
if redacter_domain == redactee_domain:
return True
- elif room_version == RoomVersions.V3:
+ else:
event.internal_metadata.recheck_redaction = True
return True
- else:
- raise RuntimeError("Unrecognized room version %r" % (room_version,))
raise AuthError(
403,
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 20c1ab4..1edd19c 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -21,7 +21,8 @@ import six
from unpaddedbase64 import encode_base64
-from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersions
+from synapse.api.errors import UnsupportedRoomVersionError
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
@@ -77,6 +78,20 @@ class _EventInternalMetadata(object):
"""
return getattr(self, "recheck_redaction", False)
+ def is_soft_failed(self):
+ """Whether the event has been soft failed.
+
+ Soft failed events should be handled as usual, except:
+ 1. They should not go down sync or event streams, or generally
+ sent to clients.
+ 2. They should not be added to the forward extremities (and
+ therefore not to current state).
+
+ Returns:
+ bool
+ """
+ return getattr(self, "soft_failed", False)
+
def _event_dict_property(key):
# We want to be able to use hasattr with the event dict properties.
@@ -127,7 +142,6 @@ class EventBase(object):
origin = _event_dict_property("origin")
origin_server_ts = _event_dict_property("origin_server_ts")
prev_events = _event_dict_property("prev_events")
- prev_state = _event_dict_property("prev_state")
redacts = _event_dict_property("redacts")
room_id = _event_dict_property("room_id")
sender = _event_dict_property("sender")
@@ -322,13 +336,32 @@ class FrozenEventV2(EventBase):
return self.__repr__()
def __repr__(self):
- return "<FrozenEventV2 event_id='%s', type='%s', state_key='%s'>" % (
+ return "<%s event_id='%s', type='%s', state_key='%s'>" % (
+ self.__class__.__name__,
self.event_id,
self.get("type", None),
self.get("state_key", None),
)
+class FrozenEventV3(FrozenEventV2):
+ """FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
+ format_version = EventFormatVersions.V3 # All events of this type are V3
+
+ @property
+ def event_id(self):
+ # We have to import this here as otherwise we get an import loop which
+ # is hard to break.
+ from synapse.crypto.event_signing import compute_event_reference_hash
+
+ if self._event_id:
+ return self._event_id
+ self._event_id = "$" + encode_base64(
+ compute_event_reference_hash(self)[1], urlsafe=True
+ )
+ return self._event_id
+
+
def room_version_to_event_format(room_version):
"""Converts a room version string to the event format
@@ -337,19 +370,17 @@ def room_version_to_event_format(room_version):
Returns:
int
+
+ Raises:
+ UnsupportedRoomVersionError if the room version is unknown
"""
- if room_version not in KNOWN_ROOM_VERSIONS:
- # We should have already checked version, so this should not happen
- raise RuntimeError("Unrecognized room version %s" % (room_version,))
-
- if room_version in (
- RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
- ):
- return EventFormatVersions.V1
- elif room_version in (RoomVersions.V3,):
- return EventFormatVersions.V2
- else:
- raise RuntimeError("Unrecognized room version %s" % (room_version,))
+ v = KNOWN_ROOM_VERSIONS.get(room_version)
+
+ if not v:
+ # this can happen if support is withdrawn for a room version
+ raise UnsupportedRoomVersionError()
+
+ return v.event_format
def event_type_from_format_version(format_version):
@@ -368,6 +399,8 @@ def event_type_from_format_version(format_version):
return FrozenEvent
elif format_version == EventFormatVersions.V2:
return FrozenEventV2
+ elif format_version == EventFormatVersions.V3:
+ return FrozenEventV3
else:
raise Exception(
"No event format %r" % (format_version,)
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 06e01be..1fe995f 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -17,21 +17,18 @@ import attr
from twisted.internet import defer
-from synapse.api.constants import (
+from synapse.api.constants import MAX_DEPTH
+from synapse.api.errors import UnsupportedRoomVersionError
+from synapse.api.room_versions import (
KNOWN_EVENT_FORMAT_VERSIONS,
KNOWN_ROOM_VERSIONS,
- MAX_DEPTH,
EventFormatVersions,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.types import EventID
from synapse.util.stringutils import random_string
-from . import (
- _EventInternalMetadata,
- event_type_from_format_version,
- room_version_to_event_format,
-)
+from . import _EventInternalMetadata, event_type_from_format_version
@attr.s(slots=True, cmp=False, frozen=True)
@@ -170,21 +167,33 @@ class EventBuilderFactory(object):
def new(self, room_version, key_values):
"""Generate an event builder appropriate for the given room version
+ Deprecated: use for_room_version with a RoomVersion object instead
+
Args:
- room_version (str): Version of the room that we're creating an
- event builder for
+ room_version (str): Version of the room that we're creating an event builder
+ for
key_values (dict): Fields used as the basis of the new event
Returns:
EventBuilder
"""
+ v = KNOWN_ROOM_VERSIONS.get(room_version)
+ if not v:
+ # this can happen if support is withdrawn for a room version
+ raise UnsupportedRoomVersionError()
+ return self.for_room_version(v, key_values)
- # There's currently only the one event version defined
- if room_version not in KNOWN_ROOM_VERSIONS:
- raise Exception(
- "No event format defined for version %r" % (room_version,)
- )
+ def for_room_version(self, room_version, key_values):
+ """Generate an event builder appropriate for the given room version
+
+ Args:
+ room_version (synapse.api.room_versions.RoomVersion):
+ Version of the room that we're creating an event builder for
+ key_values (dict): Fields used as the basis of the new event
+ Returns:
+ EventBuilder
+ """
return EventBuilder(
store=self.store,
state=self.state,
@@ -192,7 +201,7 @@ class EventBuilderFactory(object):
clock=self.clock,
hostname=self.hostname,
signing_key=self.signing_key,
- format_version=room_version_to_event_format(room_version),
+ format_version=room_version.event_format,
type=key_values["type"],
state_key=key_values.get("state_key"),
room_id=key_values["room_id"],
@@ -222,7 +231,6 @@ def create_local_event_from_event_dict(clock, hostname, signing_key,
FrozenEvent
"""
- # There's currently only the one event version defined
if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
raise Exception(
"No event format defined for version %r" % (format_version,)
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 368b5f6..fa09c13 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -187,7 +187,9 @@ class EventContext(object):
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
- is None, which happens when the associated event is an outlier.
+ is None, which happens when the associated event is an outlier.
+ Maps a (type, state_key) to the event ID of the state event matching
+ this tuple.
"""
if not self._fetching_state_deferred:
@@ -205,7 +207,9 @@ class EventContext(object):
Returns:
Deferred[dict[(str, str), str]|None]: Returns None if state_group
- is None, which happens when the associated event is an outlier.
+ is None, which happens when the associated event is an outlier.
+ Maps a (type, state_key) to the event ID of the state event matching
+ this tuple.
"""
if not self._fetching_state_deferred:
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 633e068..6058077 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2017 New Vector Ltd.
+# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 07fccdd..27a2a9e 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -19,7 +19,10 @@ from six import string_types
from frozendict import frozendict
-from synapse.api.constants import EventTypes
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, RelationTypes
+from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
@@ -311,3 +314,92 @@ def serialize_event(e, time_now_ms, as_client_event=True,
d = only_fields(d, only_event_fields)
return d
+
+
+class EventClientSerializer(object):
+ """Serializes events that are to be sent to clients.
+
+ This is used for bundling extra information with any events to be sent to
+ clients.
+ """
+
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+ self.experimental_msc1849_support_enabled = (
+ hs.config.experimental_msc1849_support_enabled
+ )
+
+ @defer.inlineCallbacks
+ def serialize_event(self, event, time_now, **kwargs):
+ """Serializes a single event.
+
+ Args:
+ event (EventBase)
+ time_now (int): The current time in milliseconds
+ **kwargs: Arguments to pass to `serialize_event`
+
+ Returns:
+ Deferred[dict]: The serialized event
+ """
+ # To handle the case of presence events and the like
+ if not isinstance(event, EventBase):
+ defer.returnValue(event)
+
+ event_id = event.event_id
+ serialized_event = serialize_event(event, time_now, **kwargs)
+
+ # If MSC1849 is enabled then we need to look if thre are any relations
+ # we need to bundle in with the event
+ if self.experimental_msc1849_support_enabled:
+ annotations = yield self.store.get_aggregation_groups_for_event(
+ event_id,
+ )
+ references = yield self.store.get_relations_for_event(
+ event_id, RelationTypes.REFERENCE, direction="f",
+ )
+
+ if annotations.chunk:
+ r = serialized_event["unsigned"].setdefault("m.relations", {})
+ r[RelationTypes.ANNOTATION] = annotations.to_dict()
+
+ if references.chunk:
+ r = serialized_event["unsigned"].setdefault("m.relations", {})
+ r[RelationTypes.REFERENCE] = references.to_dict()
+
+ edit = None
+ if event.type == EventTypes.Message:
+ edit = yield self.store.get_applicable_edit(event_id)
+
+ if edit:
+ # If there is an edit replace the content, preserving existing
+ # relations.
+
+ relations = event.content.get("m.relates_to")
+ serialized_event["content"] = edit.content.get("m.new_content", {})
+ if relations:
+ serialized_event["content"]["m.relates_to"] = relations
+ else:
+ serialized_event["content"].pop("m.relates_to", None)
+
+ r = serialized_event["unsigned"].setdefault("m.relations", {})
+ r[RelationTypes.REPLACE] = {
+ "event_id": edit.event_id,
+ }
+
+ defer.returnValue(serialized_event)
+
+ def serialize_events(self, events, time_now, **kwargs):
+ """Serializes multiple events.
+
+ Args:
+ event (iter[EventBase])
+ time_now (int): The current time in milliseconds
+ **kwargs: Arguments to pass to `serialize_event`
+
+ Returns:
+ Deferred[list[dict]]: The list of serialized events
+ """
+ return yieldable_gather_results(
+ self.serialize_event, events,
+ time_now=time_now, **kwargs
+ )
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index a072674..711af51 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -15,8 +15,9 @@
from six import string_types
-from synapse.api.constants import EventFormatVersions, EventTypes, Membership
-from synapse.api.errors import SynapseError
+from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
+from synapse.api.errors import Codes, SynapseError
+from synapse.api.room_versions import EventFormatVersions
from synapse.types import EventID, RoomID, UserID
@@ -55,6 +56,17 @@ class EventValidator(object):
if not isinstance(getattr(event, s), string_types):
raise SynapseError(400, "'%s' not a string type" % (s,))
+ if event.type == EventTypes.Aliases:
+ if "aliases" in event.content:
+ for alias in event.content["aliases"]:
+ if len(alias) > MAX_ALIAS_LENGTH:
+ raise SynapseError(
+ 400,
+ ("Can't create aliases longer than"
+ " %d characters" % (MAX_ALIAS_LENGTH,)),
+ Codes.INVALID_PARAM,
+ )
+
def validate_builder(self, event):
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index a7a2ec4..cffa831 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -20,8 +20,9 @@ import six
from twisted.internet import defer
from twisted.internet.defer import DeferredList
-from synapse.api.constants import MAX_DEPTH, EventTypes, Membership, RoomVersions
+from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import event_type_from_format_version
from synapse.events.utils import prune_event
@@ -268,15 +269,29 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
for p in pdus_to_check_sender
])
+ def sender_err(e, pdu_to_check):
+ errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
+ pdu_to_check.pdu.event_id,
+ pdu_to_check.sender_domain,
+ e.getErrorMessage(),
+ )
+ # XX not really sure if these are the right codes, but they are what
+ # we've done for ages
+ raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+
for p, d in zip(pdus_to_check_sender, more_deferreds):
+ d.addErrback(sender_err, p)
p.deferreds.append(d)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
- if room_version in (
- RoomVersions.V1, RoomVersions.V2, RoomVersions.STATE_V2_TEST,
- ):
+ # (ie, the room version uses old-style non-hash event IDs).
+ v = KNOWN_ROOM_VERSIONS.get(room_version)
+ if not v:
+ raise RuntimeError("Unrecognized room version %s" % (room_version,))
+
+ if v.event_format == EventFormatVersions.V1:
pdus_to_check_event_id = [
p for p in pdus_to_check
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
@@ -287,12 +302,19 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
for p in pdus_to_check_event_id
])
+ def event_err(e, pdu_to_check):
+ errmsg = (
+ "event id %s: unable to verify signature for event id domain: %s" % (
+ pdu_to_check.pdu.event_id,
+ e.getErrorMessage(),
+ )
+ )
+ # XX as above: not really sure if these are the right codes
+ raise SynapseError(400, errmsg, Codes.UNAUTHORIZED)
+
for p, d in zip(pdus_to_check_event_id, more_deferreds):
+ d.addErrback(event_err, p)
p.deferreds.append(d)
- elif room_version in (RoomVersions.V3,):
- pass # No further checks needed, as event IDs are hashes here
- else:
- raise RuntimeError("Unrecognized room version %s" % (room_version,))
# replace lists of deferreds with single Deferreds
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 58e04d8..f3fc897 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -25,12 +25,7 @@ from prometheus_client import Counter
from twisted.internet import defer
-from synapse.api.constants import (
- KNOWN_ROOM_VERSIONS,
- EventTypes,
- Membership,
- RoomVersions,
-)
+from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
CodeMessageException,
Codes,
@@ -38,6 +33,11 @@ from synapse.api.errors import (
HttpResponseException,
SynapseError,
)
+from synapse.api.room_versions import (
+ KNOWN_ROOM_VERSIONS,
+ EventFormatVersions,
+ RoomVersions,
+)
from synapse.events import builder, room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.util import logcontext, unwrapFirstError
@@ -570,7 +570,7 @@ class FederationClient(FederationBase):
Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of
`(origin, event, event_format)` where origin is the remote
homeserver which generated the event, and event_format is one of
- `synapse.api.constants.EventFormatVersions`.
+ `synapse.api.room_versions.EventFormatVersions`.
Fails with a ``SynapseError`` if the chosen remote server
returns a 300/400 code.
@@ -592,7 +592,7 @@ class FederationClient(FederationBase):
# Note: If not supplied, the room version may be either v1 or v2,
# however either way the event format version will be v1.
- room_version = ret.get("room_version", RoomVersions.V1)
+ room_version = ret.get("room_version", RoomVersions.V1.identifier)
event_format = room_version_to_event_format(room_version)
pdu_dict = ret.get("event", None)
@@ -695,7 +695,9 @@ class FederationClient(FederationBase):
room_version = None
for e in state:
if (e.type, e.state_key) == (EventTypes.Create, ""):
- room_version = e.content.get("room_version", RoomVersions.V1)
+ room_version = e.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
break
if room_version is None:
@@ -802,11 +804,10 @@ class FederationClient(FederationBase):
raise err
# Otherwise, we assume that the remote server doesn't understand
- # the v2 invite API.
-
- if room_version in (RoomVersions.V1, RoomVersions.V2):
- pass # We'll fall through
- else:
+ # the v2 invite API. That's ok provided the room uses old-style event
+ # IDs.
+ v = KNOWN_ROOM_VERSIONS.get(room_version)
+ if v.event_format != EventFormatVersions.V1:
raise SynapseError(
400,
"User's homeserver does not support this room version",
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 569eb27..4c28c1d 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -25,7 +25,7 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
-from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
+from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
@@ -33,7 +33,9 @@ from synapse.api.errors import (
IncompatibleRoomVersionError,
NotFoundError,
SynapseError,
+ UnsupportedRoomVersionError,
)
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.crypto.event_signing import compute_event_signature
from synapse.events import room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
@@ -197,11 +199,22 @@ class FederationServer(FederationBase):
try:
room_version = yield self.store.get_room_version(room_id)
- format_ver = room_version_to_event_format(room_version)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
+ try:
+ format_ver = room_version_to_event_format(room_version)
+ except UnsupportedRoomVersionError:
+ # this can happen if support for a given room version is withdrawn,
+ # so that we still get events for said room.
+ logger.info(
+ "Ignoring PDU for room %s with unknown version %s",
+ room_id,
+ room_version,
+ )
+ continue
+
event = event_from_pdu_json(p, format_ver)
pdus_by_room.setdefault(room_id, []).append(event)
@@ -886,6 +899,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry
"""
+ if not self.config.use_presence and edu_type == "m.presence":
+ return
+
handler = self.edu_handlers.get(edu_type)
if handler:
return super(ReplicationFederationHandlerRegistry, self).on_edu(
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 6f59957..0240b33 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -46,7 +46,7 @@ logger = logging.getLogger(__name__)
class FederationRemoteSendQueue(object):
- """A drop in replacement for TransactionQueue"""
+ """A drop in replacement for FederationSender"""
def __init__(self, hs):
self.server_name = hs.hostname
@@ -55,7 +55,12 @@ class FederationRemoteSendQueue(object):
self.is_mine_id = hs.is_mine_id
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
- self.presence_changed = SortedDict() # Stream position -> user_id
+ self.presence_changed = SortedDict() # Stream position -> list[user_id]
+
+ # Stores the destinations we need to explicitly send presence to about a
+ # given user.
+ # Stream position -> (user_id, destinations)
+ self.presence_destinations = SortedDict()
self.keyed_edu = {} # (destination, key) -> EDU
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
@@ -77,7 +82,7 @@ class FederationRemoteSendQueue(object):
for queue_name in [
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
- "edus", "device_messages", "pos_time",
+ "edus", "device_messages", "pos_time", "presence_destinations",
]:
register(queue_name, getattr(self, queue_name))
@@ -121,6 +126,15 @@ class FederationRemoteSendQueue(object):
for user_id in uids
)
+ keys = self.presence_destinations.keys()
+ i = self.presence_destinations.bisect_left(position_to_delete)
+ for key in keys[:i]:
+ del self.presence_destinations[key]
+
+ user_ids.update(
+ user_id for user_id, _ in self.presence_destinations.values()
+ )
+
to_del = [
user_id for user_id in self.presence_map if user_id not in user_ids
]
@@ -154,13 +168,17 @@ class FederationRemoteSendQueue(object):
del self.device_messages[key]
def notify_new_events(self, current_id):
- """As per TransactionQueue"""
+ """As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
pass
- def send_edu(self, destination, edu_type, content, key=None):
- """As per TransactionQueue"""
+ def build_and_send_edu(self, destination, edu_type, content, key=None):
+ """As per FederationSender"""
+ if destination == self.server_name:
+ logger.info("Not sending EDU to ourselves")
+ return
+
pos = self._next_pos()
edu = Edu(
@@ -179,8 +197,17 @@ class FederationRemoteSendQueue(object):
self.notifier.on_new_replication_data()
+ def send_read_receipt(self, receipt):
+ """As per FederationSender
+
+ Args:
+ receipt (synapse.types.ReadReceipt):
+ """
+ # nothing to do here: the replication listener will handle it.
+ pass
+
def send_presence(self, states):
- """As per TransactionQueue
+ """As per FederationSender
Args:
states (list(UserPresenceState))
@@ -196,8 +223,22 @@ class FederationRemoteSendQueue(object):
self.notifier.on_new_replication_data()
+ def send_presence_to_destinations(self, states, destinations):
+ """As per FederationSender
+
+ Args:
+ states (list[UserPresenceState])
+ destinations (list[str])
+ """
+ for state in states:
+ pos = self._next_pos()
+ self.presence_map.update({state.user_id: state for state in states})
+ self.presence_destinations[pos] = (state.user_id, destinations)
+
+ self.notifier.on_new_replication_data()
+
def send_device_messages(self, destination):
- """As per TransactionQueue"""
+ """As per FederationSender"""
pos = self._next_pos()
self.device_messages[pos] = destination
self.notifier.on_new_replication_data()
@@ -248,6 +289,16 @@ class FederationRemoteSendQueue(object):
state=self.presence_map[user_id],
)))
+ # Fetch presence to send to destinations
+ i = self.presence_destinations.bisect_right(from_token)
+ j = self.presence_destinations.bisect_right(to_token) + 1
+
+ for pos, (user_id, dests) in self.presence_destinations.items()[i:j]:
+ rows.append((pos, PresenceDestinationsRow(
+ state=self.presence_map[user_id],
+ destinations=list(dests),
+ )))
+
# Fetch changes keyed edus
i = self.keyed_edu_changed.bisect_right(from_token)
j = self.keyed_edu_changed.bisect_right(to_token) + 1
@@ -344,6 +395,29 @@ class PresenceRow(BaseFederationRow, namedtuple("PresenceRow", (
buff.presence.append(self.state)
+class PresenceDestinationsRow(BaseFederationRow, namedtuple("PresenceDestinationsRow", (
+ "state", # UserPresenceState
+ "destinations", # list[str]
+))):
+ TypeId = "pd"
+
+ @staticmethod
+ def from_data(data):
+ return PresenceDestinationsRow(
+ state=UserPresenceState.from_dict(data["state"]),
+ destinations=data["dests"],
+ )
+
+ def to_data(self):
+ return {
+ "state": self.state.as_dict(),
+ "dests": self.destinations,
+ }
+
+ def add_to_buffer(self, buff):
+ buff.presence_destinations.append((self.state, self.destinations))
+
+
class KeyedEduRow(BaseFederationRow, namedtuple("KeyedEduRow", (
"key", # tuple(str) - the edu key passed to send_edu
"edu", # Edu
@@ -415,6 +489,7 @@ TypeToRow = {
Row.TypeId: Row
for Row in (
PresenceRow,
+ PresenceDestinationsRow,
KeyedEduRow,
EduRow,
DeviceRow,
@@ -424,6 +499,7 @@ TypeToRow = {
ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
"presence", # list(UserPresenceState)
+ "presence_destinations", # list of tuples of UserPresenceState and destinations
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
"device_destinations", # set of destinations
@@ -435,7 +511,7 @@ def process_rows_for_federation(transaction_queue, rows):
transaction queue ready for sending to the relevant homeservers.
Args:
- transaction_queue (TransactionQueue)
+ transaction_queue (FederationSender)
rows (list(synapse.replication.tcp.streams.FederationStreamRow))
"""
@@ -445,6 +521,7 @@ def process_rows_for_federation(transaction_queue, rows):
buff = ParsedFederationStreamData(
presence=[],
+ presence_destinations=[],
keyed_edus={},
edus={},
device_destinations=set(),
@@ -463,17 +540,18 @@ def process_rows_for_federation(transaction_queue, rows):
if buff.presence:
transaction_queue.send_presence(buff.presence)
+ for state, destinations in buff.presence_destinations:
+ transaction_queue.send_presence_to_destinations(
+ states=[state], destinations=destinations,
+ )
+
for destination, edu_map in iteritems(buff.keyed_edus):
for key, edu in edu_map.items():
- transaction_queue.send_edu(
- edu.destination, edu.edu_type, edu.content, key=key,
- )
+ transaction_queue.send_edu(edu, key)
for destination, edu_list in iteritems(buff.edus):
for edu in edu_list:
- transaction_queue.send_edu(
- edu.destination, edu.edu_type, edu.content, key=None,
- )
+ transaction_queue.send_edu(edu, None)
for destination in buff.device_destinations:
transaction_queue.send_device_messages(destination)
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
new file mode 100644
index 0000000..4f0f939
--- /dev/null
+++ b/synapse/federation/sender/__init__.py
@@ -0,0 +1,482 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from six import itervalues
+
+from prometheus_client import Counter
+
+from twisted.internet import defer
+
+import synapse.metrics
+from synapse.federation.sender.per_destination_queue import PerDestinationQueue
+from synapse.federation.sender.transaction_manager import TransactionManager
+from synapse.federation.units import Edu
+from synapse.handlers.presence import get_interested_remotes
+from synapse.metrics import (
+ LaterGauge,
+ event_processing_loop_counter,
+ event_processing_loop_room_count,
+ events_processed_counter,
+)
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import logcontext
+from synapse.util.metrics import measure_func
+
+logger = logging.getLogger(__name__)
+
+sent_pdus_destination_dist_count = Counter(
+ "synapse_federation_client_sent_pdu_destinations:count",
+ "Number of PDUs queued for sending to one or more destinations",
+)
+
+sent_pdus_destination_dist_total = Counter(
+ "synapse_federation_client_sent_pdu_destinations:total", ""
+ "Total number of PDUs queued for sending across all destinations",
+)
+
+
+class FederationSender(object):
+ def __init__(self, hs):
+ self.hs = hs
+ self.server_name = hs.hostname
+
+ self.store = hs.get_datastore()
+ self.state = hs.get_state_handler()
+
+ self.clock = hs.get_clock()
+ self.is_mine_id = hs.is_mine_id
+
+ self._transaction_manager = TransactionManager(hs)
+
+ # map from destination to PerDestinationQueue
+ self._per_destination_queues = {} # type: dict[str, PerDestinationQueue]
+
+ LaterGauge(
+ "synapse_federation_transaction_queue_pending_destinations",
+ "",
+ [],
+ lambda: sum(
+ 1 for d in self._per_destination_queues.values()
+ if d.transmission_loop_running
+ ),
+ )
+
+ # Map of user_id -> UserPresenceState for all the pending presence
+ # to be sent out by user_id. Entries here get processed and put in
+ # pending_presence_by_dest
+ self.pending_presence = {}
+
+ LaterGauge(
+ "synapse_federation_transaction_queue_pending_pdus",
+ "",
+ [],
+ lambda: sum(
+ d.pending_pdu_count() for d in self._per_destination_queues.values()
+ ),
+ )
+ LaterGauge(
+ "synapse_federation_transaction_queue_pending_edus",
+ "",
+ [],
+ lambda: sum(
+ d.pending_edu_count() for d in self._per_destination_queues.values()
+ ),
+ )
+
+ self._order = 1
+
+ self._is_processing = False
+ self._last_poked_id = -1
+
+ self._processing_pending_presence = False
+
+ # map from room_id to a set of PerDestinationQueues which we believe are
+ # awaiting a call to flush_read_receipts_for_room. The presence of an entry
+ # here for a given room means that we are rate-limiting RR flushes to that room,
+ # and that there is a pending call to _flush_rrs_for_room in the system.
+ self._queues_awaiting_rr_flush_by_room = {
+ } # type: dict[str, set[PerDestinationQueue]]
+
+ self._rr_txn_interval_per_room_ms = (
+ 1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
+ )
+
+ def _get_per_destination_queue(self, destination):
+ """Get or create a PerDestinationQueue for the given destination
+
+ Args:
+ destination (str): server_name of remote server
+
+ Returns:
+ PerDestinationQueue
+ """
+ queue = self._per_destination_queues.get(destination)
+ if not queue:
+ queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
+ self._per_destination_queues[destination] = queue
+ return queue
+
+ def notify_new_events(self, current_id):
+ """This gets called when we have some new events we might want to
+ send out to other servers.
+ """
+ self._last_poked_id = max(current_id, self._last_poked_id)
+
+ if self._is_processing:
+ return
+
+ # fire off a processing loop in the background
+ run_as_background_process(
+ "process_event_queue_for_federation",
+ self._process_event_queue_loop,
+ )
+
+ @defer.inlineCallbacks
+ def _process_event_queue_loop(self):
+ try:
+ self._is_processing = True
+ while True:
+ last_token = yield self.store.get_federation_out_pos("events")
+ next_token, events = yield self.store.get_all_new_events_stream(
+ last_token, self._last_poked_id, limit=100,
+ )
+
+ logger.debug("Handling %s -> %s", last_token, next_token)
+
+ if not events and next_token >= self._last_poked_id:
+ break
+
+ @defer.inlineCallbacks
+ def handle_event(event):
+ # Only send events for this server.
+ send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
+ is_mine = self.is_mine_id(event.sender)
+ if not is_mine and send_on_behalf_of is None:
+ return
+
+ try:
+ # Get the state from before the event.
+ # We need to make sure that this is the state from before
+ # the event and not from after it.
+ # Otherwise if the last member on a server in a room is
+ # banned then it won't receive the event because it won't
+ # be in the room after the ban.
+ destinations = yield self.state.get_current_hosts_in_room(
+ event.room_id, latest_event_ids=event.prev_event_ids(),
+ )
+ except Exception:
+ logger.exception(
+ "Failed to calculate hosts in room for event: %s",
+ event.event_id,
+ )
+ return
+
+ destinations = set(destinations)
+
+ if send_on_behalf_of is not None:
+ # If we are sending the event on behalf of another server
+ # then it already has the event and there is no reason to
+ # send the event to it.
+ destinations.discard(send_on_behalf_of)
+
+ logger.debug("Sending %s to %r", event, destinations)
+
+ self._send_pdu(event, destinations)
+
+ @defer.inlineCallbacks
+ def handle_room_events(events):
+ for event in events:
+ yield handle_event(event)
+
+ events_by_room = {}
+ for event in events:
+ events_by_room.setdefault(event.room_id, []).append(event)
+
+ yield logcontext.make_deferred_yieldable(defer.gatherResults(
+ [
+ logcontext.run_in_background(handle_room_events, evs)
+ for evs in itervalues(events_by_room)
+ ],
+ consumeErrors=True
+ ))
+
+ yield self.store.update_federation_out_pos(
+ "events", next_token
+ )
+
+ if events:
+ now = self.clock.time_msec()
+ ts = yield self.store.get_received_ts(events[-1].event_id)
+
+ synapse.metrics.event_processing_lag.labels(
+ "federation_sender").set(now - ts)
+ synapse.metrics.event_processing_last_ts.labels(
+ "federation_sender").set(ts)
+
+ events_processed_counter.inc(len(events))
+
+ event_processing_loop_room_count.labels(
+ "federation_sender"
+ ).inc(len(events_by_room))
+
+ event_processing_loop_counter.labels("federation_sender").inc()
+
+ synapse.metrics.event_processing_positions.labels(
+ "federation_sender").set(next_token)
+
+ finally:
+ self._is_processing = False
+
+ def _send_pdu(self, pdu, destinations):
+ # We loop through all destinations to see whether we already have
+ # a transaction in progress. If we do, stick it in the pending_pdus
+ # table and we'll get back to it later.
+
+ order = self._order
+ self._order += 1
+
+ destinations = set(destinations)
+ destinations.discard(self.server_name)
+ logger.debug("Sending to: %s", str(destinations))
+
+ if not destinations:
+ return
+
+ sent_pdus_destination_dist_total.inc(len(destinations))
+ sent_pdus_destination_dist_count.inc()
+
+ for destination in destinations:
+ self._get_per_destination_queue(destination).send_pdu(pdu, order)
+
+ @defer.inlineCallbacks
+ def send_read_receipt(self, receipt):
+ """Send a RR to any other servers in the room
+
+ Args:
+ receipt (synapse.types.ReadReceipt): receipt to be sent
+ """
+
+ # Some background on the rate-limiting going on here.
+ #
+ # It turns out that if we attempt to send out RRs as soon as we get them from
+ # a client, then we end up trying to do several hundred Hz of federation
+ # transactions. (The number of transactions scales as O(N^2) on the size of a
+ # room, since in a large room we have both more RRs coming in, and more servers
+ # to send them to.)
+ #
+ # This leads to a lot of CPU load, and we end up getting behind. The solution
+ # currently adopted is as follows:
+ #
+ # The first receipt in a given room is sent out immediately, at time T0. Any
+ # further receipts are, in theory, batched up for N seconds, where N is calculated
+ # based on the number of servers in the room to achieve a transaction frequency
+ # of around 50Hz. So, for example, if there were 100 servers in the room, then
+ # N would be 100 / 50Hz = 2 seconds.
+ #
+ # Then, after T+N, we flush out any receipts that have accumulated, and restart
+ # the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
+ # we stop the cycle and go back to the start.
+ #
+ # However, in practice, it is often possible to flush out receipts earlier: in
+ # particular, if we are sending a transaction to a given server anyway (for
+ # example, because we have a PDU or a RR in another room to send), then we may
+ # as well send out all of the pending RRs for that server. So it may be that
+ # by the time we get to T+N, we don't actually have any RRs left to send out.
+ # Nevertheless we continue to buffer up RRs for the room in question until we
+ # reach the point that no RRs arrive between timer ticks.
+ #
+ # For even more background, see https://github.com/matrix-org/synapse/issues/4730.
+
+ room_id = receipt.room_id
+
+ # Work out which remote servers should be poked and poke them.
+ domains = yield self.state.get_current_hosts_in_room(room_id)
+ domains = [d for d in domains if d != self.server_name]
+ if not domains:
+ return
+
+ queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(
+ room_id
+ )
+
+ # if there is no flush yet scheduled, we will send out these receipts with
+ # immediate flushes, and schedule the next flush for this room.
+ if queues_pending_flush is not None:
+ logger.debug("Queuing receipt for: %r", domains)
+ else:
+ logger.debug("Sending receipt to: %r", domains)
+ self._schedule_rr_flush_for_room(room_id, len(domains))
+
+ for domain in domains:
+ queue = self._get_per_destination_queue(domain)
+ queue.queue_read_receipt(receipt)
+
+ # if there is already a RR flush pending for this room, then make sure this
+ # destination is registered for the flush
+ if queues_pending_flush is not None:
+ queues_pending_flush.add(queue)
+ else:
+ queue.flush_read_receipts_for_room(room_id)
+
+ def _schedule_rr_flush_for_room(self, room_id, n_domains):
+ # that is going to cause approximately len(domains) transactions, so now back
+ # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
+ backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
+
+ logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
+ self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
+ self._queues_awaiting_rr_flush_by_room[room_id] = set()
+
+ def _flush_rrs_for_room(self, room_id):
+ queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
+ logger.debug("Flushing RRs in %s to %s", room_id, queues)
+
+ if not queues:
+ # no more RRs arrived for this room; we are done.
+ return
+
+ # schedule the next flush
+ self._schedule_rr_flush_for_room(room_id, len(queues))
+
+ for queue in queues:
+ queue.flush_read_receipts_for_room(room_id)
+
+ @logcontext.preserve_fn # the caller should not yield on this
+ @defer.inlineCallbacks
+ def send_presence(self, states):
+ """Send the new presence states to the appropriate destinations.
+
+ This actually queues up the presence states ready for sending and
+ triggers a background task to process them and send out the transactions.
+
+ Args:
+ states (list(UserPresenceState))
+ """
+ if not self.hs.config.use_presence:
+ # No-op if presence is disabled.
+ return
+
+ # First we queue up the new presence by user ID, so multiple presence
+ # updates in quick succession are correctly handled.
+ # We only want to send presence for our own users, so lets always just
+ # filter here just in case.
+ self.pending_presence.update({
+ state.user_id: state for state in states
+ if self.is_mine_id(state.user_id)
+ })
+
+ # We then handle the new pending presence in batches, first figuring
+ # out the destinations we need to send each state to and then poking it
+ # to attempt a new transaction. We linearize this so that we don't
+ # accidentally mess up the ordering and send multiple presence updates
+ # in the wrong order
+ if self._processing_pending_presence:
+ return
+
+ self._processing_pending_presence = True
+ try:
+ while True:
+ states_map = self.pending_presence
+ self.pending_presence = {}
+
+ if not states_map:
+ break
+
+ yield self._process_presence_inner(list(states_map.values()))
+ except Exception:
+ logger.exception("Error sending presence states to servers")
+ finally:
+ self._processing_pending_presence = False
+
+ def send_presence_to_destinations(self, states, destinations):
+ """Send the given presence states to the given destinations.
+
+ Args:
+ states (list[UserPresenceState])
+ destinations (list[str])
+ """
+
+ if not states or not self.hs.config.use_presence:
+ # No-op if presence is disabled.
+ return
+
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+ self._get_per_destination_queue(destination).send_presence(states)
+
+ @measure_func("txnqueue._process_presence")
+ @defer.inlineCallbacks
+ def _process_presence_inner(self, states):
+ """Given a list of states populate self.pending_presence_by_dest and
+ poke to send a new transaction to each destination
+
+ Args:
+ states (list(UserPresenceState))
+ """
+ hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
+
+ for destinations, states in hosts_and_states:
+ for destination in destinations:
+ if destination == self.server_name:
+ continue
+ self._get_per_destination_queue(destination).send_presence(states)
+
+ def build_and_send_edu(self, destination, edu_type, content, key=None):
+ """Construct an Edu object, and queue it for sending
+
+ Args:
+ destination (str): name of server to send to
+ edu_type (str): type of EDU to send
+ content (dict): content of EDU
+ key (Any|None): clobbering key for this edu
+ """
+ if destination == self.server_name:
+ logger.info("Not sending EDU to ourselves")
+ return
+
+ edu = Edu(
+ origin=self.server_name,
+ destination=destination,
+ edu_type=edu_type,
+ content=content,
+ )
+
+ self.send_edu(edu, key)
+
+ def send_edu(self, edu, key):
+ """Queue an EDU for sending
+
+ Args:
+ edu (Edu): edu to send
+ key (Any|None): clobbering key for this edu
+ """
+ queue = self._get_per_destination_queue(edu.destination)
+ if key:
+ queue.send_keyed_edu(edu, key)
+ else:
+ queue.send_edu(edu)
+
+ def send_device_messages(self, destination):
+ if destination == self.server_name:
+ logger.info("Not sending device update to ourselves")
+ return
+
+ self._get_per_destination_queue(destination).attempt_new_transaction()
+
+ def get_current_token(self):
+ return 0
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
new file mode 100644
index 0000000..fae8bea
--- /dev/null
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -0,0 +1,386 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import datetime
+import logging
+
+from prometheus_client import Counter
+
+from twisted.internet import defer
+
+from synapse.api.errors import (
+ FederationDeniedError,
+ HttpResponseException,
+ RequestSendFailed,
+)
+from synapse.events import EventBase
+from synapse.federation.units import Edu
+from synapse.handlers.presence import format_user_presence_state
+from synapse.metrics import sent_transactions_counter
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage import UserPresenceState
+from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
+
+# This is defined in the Matrix spec and enforced by the receiver.
+MAX_EDUS_PER_TRANSACTION = 100
+
+logger = logging.getLogger(__name__)
+
+
+sent_edus_counter = Counter(
+ "synapse_federation_client_sent_edus", "Total number of EDUs successfully sent"
+)
+
+sent_edus_by_type = Counter(
+ "synapse_federation_client_sent_edus_by_type",
+ "Number of sent EDUs successfully sent, by event type",
+ ["type"],
+)
+
+
+class PerDestinationQueue(object):
+ """
+ Manages the per-destination transmission queues.
+
+ Args:
+ hs (synapse.HomeServer):
+ transaction_sender (TransactionManager):
+ destination (str): the server_name of the destination that we are managing
+ transmission for.
+ """
+
+ def __init__(self, hs, transaction_manager, destination):
+ self._server_name = hs.hostname
+ self._clock = hs.get_clock()
+ self._store = hs.get_datastore()
+ self._transaction_manager = transaction_manager
+
+ self._destination = destination
+ self.transmission_loop_running = False
+
+ # a list of tuples of (pending pdu, order)
+ self._pending_pdus = [] # type: list[tuple[EventBase, int]]
+ self._pending_edus = [] # type: list[Edu]
+
+ # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
+ # based on their key (e.g. typing events by room_id)
+ # Map of (edu_type, key) -> Edu
+ self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu]
+
+ # Map of user_id -> UserPresenceState of pending presence to be sent to this
+ # destination
+ self._pending_presence = {} # type: dict[str, UserPresenceState]
+
+ # room_id -> receipt_type -> user_id -> receipt_dict
+ self._pending_rrs = {}
+ self._rrs_pending_flush = False
+
+ # stream_id of last successfully sent to-device message.
+ # NB: may be a long or an int.
+ self._last_device_stream_id = 0
+
+ # stream_id of last successfully sent device list update.
+ self._last_device_list_stream_id = 0
+
+ def __str__(self):
+ return "PerDestinationQueue[%s]" % self._destination
+
+ def pending_pdu_count(self):
+ return len(self._pending_pdus)
+
+ def pending_edu_count(self):
+ return (
+ len(self._pending_edus)
+ + len(self._pending_presence)
+ + len(self._pending_edus_keyed)
+ )
+
+ def send_pdu(self, pdu, order):
+ """Add a PDU to the queue, and start the transmission loop if neccessary
+
+ Args:
+ pdu (EventBase): pdu to send
+ order (int):
+ """
+ self._pending_pdus.append((pdu, order))
+ self.attempt_new_transaction()
+
+ def send_presence(self, states):
+ """Add presence updates to the queue. Start the transmission loop if neccessary.
+
+ Args:
+ states (iterable[UserPresenceState]): presence to send
+ """
+ self._pending_presence.update({state.user_id: state for state in states})
+ self.attempt_new_transaction()
+
+ def queue_read_receipt(self, receipt):
+ """Add a RR to the list to be sent. Doesn't start the transmission loop yet
+ (see flush_read_receipts_for_room)
+
+ Args:
+ receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
+ """
+ self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
+ receipt.receipt_type, {}
+ )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
+
+ def flush_read_receipts_for_room(self, room_id):
+ # if we don't have any read-receipts for this room, it may be that we've already
+ # sent them out, so we don't need to flush.
+ if room_id not in self._pending_rrs:
+ return
+ self._rrs_pending_flush = True
+ self.attempt_new_transaction()
+
+ def send_keyed_edu(self, edu, key):
+ self._pending_edus_keyed[(edu.edu_type, key)] = edu
+ self.attempt_new_transaction()
+
+ def send_edu(self, edu):
+ self._pending_edus.append(edu)
+ self.attempt_new_transaction()
+
+ def attempt_new_transaction(self):
+ """Try to start a new transaction to this destination
+
+ If there is already a transaction in progress to this destination,
+ returns immediately. Otherwise kicks off the process of sending a
+ transaction in the background.
+ """
+ # list of (pending_pdu, deferred, order)
+ if self.transmission_loop_running:
+ # XXX: this can get stuck on by a never-ending
+ # request at which point pending_pdus just keeps growing.
+ # we need application-layer timeouts of some flavour of these
+ # requests
+ logger.debug("TX [%s] Transaction already in progress", self._destination)
+ return
+
+ logger.debug("TX [%s] Starting transaction loop", self._destination)
+
+ run_as_background_process(
+ "federation_transaction_transmission_loop",
+ self._transaction_transmission_loop,
+ )
+
+ @defer.inlineCallbacks
+ def _transaction_transmission_loop(self):
+ pending_pdus = []
+ try:
+ self.transmission_loop_running = True
+
+ # This will throw if we wouldn't retry. We do this here so we fail
+ # quickly, but we will later check this again in the http client,
+ # hence why we throw the result away.
+ yield get_retry_limiter(self._destination, self._clock, self._store)
+
+ pending_pdus = []
+ while True:
+ device_message_edus, device_stream_id, dev_list_id = (
+ # We have to keep 2 free slots for presence and rr_edus
+ yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2)
+ )
+
+ # BEGIN CRITICAL SECTION
+ #
+ # In order to avoid a race condition, we need to make sure that
+ # the following code (from popping the queues up to the point
+ # where we decide if we actually have any pending messages) is
+ # atomic - otherwise new PDUs or EDUs might arrive in the
+ # meantime, but not get sent because we hold the
+ # transmission_loop_running flag.
+
+ pending_pdus = self._pending_pdus
+
+ # We can only include at most 50 PDUs per transactions
+ pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
+
+ pending_edus = []
+
+ # We can only include at most 100 EDUs per transactions
+ # rr_edus and pending_presence take at most one slot each
+ pending_edus.extend(self._get_rr_edus(force_flush=False))
+ pending_presence = self._pending_presence
+ self._pending_presence = {}
+ if pending_presence:
+ pending_edus.append(
+ Edu(
+ origin=self._server_name,
+ destination=self._destination,
+ edu_type="m.presence",
+ content={
+ "push": [
+ format_user_presence_state(
+ presence, self._clock.time_msec()
+ )
+ for presence in pending_presence.values()
+ ]
+ },
+ )
+ )
+
+ pending_edus.extend(device_message_edus)
+ pending_edus.extend(
+ self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
+ )
+ while (
+ len(pending_edus) < MAX_EDUS_PER_TRANSACTION
+ and self._pending_edus_keyed
+ ):
+ _, val = self._pending_edus_keyed.popitem()
+ pending_edus.append(val)
+
+ if pending_pdus:
+ logger.debug(
+ "TX [%s] len(pending_pdus_by_dest[dest]) = %d",
+ self._destination,
+ len(pending_pdus),
+ )
+
+ if not pending_pdus and not pending_edus:
+ logger.debug("TX [%s] Nothing to send", self._destination)
+ self._last_device_stream_id = device_stream_id
+ return
+
+ # if we've decided to send a transaction anyway, and we have room, we
+ # may as well send any pending RRs
+ if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
+ pending_edus.extend(self._get_rr_edus(force_flush=True))
+
+ # END CRITICAL SECTION
+
+ success = yield self._transaction_manager.send_new_transaction(
+ self._destination, pending_pdus, pending_edus
+ )
+ if success:
+ sent_transactions_counter.inc()
+ sent_edus_counter.inc(len(pending_edus))
+ for edu in pending_edus:
+ sent_edus_by_type.labels(edu.edu_type).inc()
+ # Remove the acknowledged device messages from the database
+ # Only bother if we actually sent some device messages
+ if device_message_edus:
+ yield self._store.delete_device_msgs_for_remote(
+ self._destination, device_stream_id
+ )
+ logger.info(
+ "Marking as sent %r %r", self._destination, dev_list_id
+ )
+ yield self._store.mark_as_sent_devices_by_remote(
+ self._destination, dev_list_id
+ )
+
+ self._last_device_stream_id = device_stream_id
+ self._last_device_list_stream_id = dev_list_id
+ else:
+ break
+ except NotRetryingDestination as e:
+ logger.debug(
+ "TX [%s] not ready for retry yet (next retry at %s) - "
+ "dropping transaction for now",
+ self._destination,
+ datetime.datetime.fromtimestamp(
+ (e.retry_last_ts + e.retry_interval) / 1000.0
+ ),
+ )
+ except FederationDeniedError as e:
+ logger.info(e)
+ except HttpResponseException as e:
+ logger.warning(
+ "TX [%s] Received %d response to transaction: %s",
+ self._destination,
+ e.code,
+ e,
+ )
+ except RequestSendFailed as e:
+ logger.warning(
+ "TX [%s] Failed to send transaction: %s", self._destination, e
+ )
+
+ for p, _ in pending_pdus:
+ logger.info(
+ "Failed to send event %s to %s", p.event_id, self._destination
+ )
+ except Exception:
+ logger.exception("TX [%s] Failed to send transaction", self._destination)
+ for p, _ in pending_pdus:
+ logger.info(
+ "Failed to send event %s to %s", p.event_id, self._destination
+ )
+ finally:
+ # We want to be *very* sure we clear this after we stop processing
+ self.transmission_loop_running = False
+
+ def _get_rr_edus(self, force_flush):
+ if not self._pending_rrs:
+ return
+ if not force_flush and not self._rrs_pending_flush:
+ # not yet time for this lot
+ return
+
+ edu = Edu(
+ origin=self._server_name,
+ destination=self._destination,
+ edu_type="m.receipt",
+ content=self._pending_rrs,
+ )
+ self._pending_rrs = {}
+ self._rrs_pending_flush = False
+ yield edu
+
+ def _pop_pending_edus(self, limit):
+ pending_edus = self._pending_edus
+ pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
+ return pending_edus
+
+ @defer.inlineCallbacks
+ def _get_new_device_messages(self, limit):
+ last_device_list = self._last_device_list_stream_id
+ # Will return at most 20 entries
+ now_stream_id, results = yield self._store.get_devices_by_remote(
+ self._destination, last_device_list
+ )
+ edus = [
+ Edu(
+ origin=self._server_name,
+ destination=self._destination,
+ edu_type="m.device_list_update",
+ content=content,
+ )
+ for content in results
+ ]
+
+ assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
+
+ last_device_stream_id = self._last_device_stream_id
+ to_device_stream_id = self._store.get_to_device_stream_token()
+ contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
+ self._destination,
+ last_device_stream_id,
+ to_device_stream_id,
+ limit - len(edus),
+ )
+ edus.extend(
+ Edu(
+ origin=self._server_name,
+ destination=self._destination,
+ edu_type="m.direct_to_device",
+ content=content,
+ )
+ for content in contents
+ )
+
+ defer.returnValue((edus, stream_id, now_stream_id))
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
new file mode 100644
index 0000000..35e6b8f
--- /dev/null
+++ b/synapse/federation/sender/transaction_manager.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from twisted.internet import defer
+
+from synapse.api.errors import HttpResponseException
+from synapse.federation.persistence import TransactionActions
+from synapse.federation.units import Transaction
+from synapse.util.metrics import measure_func
+
+logger = logging.getLogger(__name__)
+
+
+class TransactionManager(object):
+ """Helper class which handles building and sending transactions
+
+ shared between PerDestinationQueue objects
+ """
+ def __init__(self, hs):
+ self._server_name = hs.hostname
+ self.clock = hs.get_clock() # nb must be called this for @measure_func
+ self._store = hs.get_datastore()
+ self._transaction_actions = TransactionActions(self._store)
+ self._transport_layer = hs.get_federation_transport_client()
+
+ # HACK to get unique tx id
+ self._next_txn_id = int(self.clock.time_msec())
+
+ @measure_func("_send_new_transaction")
+ @defer.inlineCallbacks
+ def send_new_transaction(self, destination, pending_pdus, pending_edus):
+
+ # Sort based on the order field
+ pending_pdus.sort(key=lambda t: t[1])
+ pdus = [x[0] for x in pending_pdus]
+ edus = pending_edus
+
+ success = True
+
+ logger.debug("TX [%s] _attempt_new_transaction", destination)
+
+ txn_id = str(self._next_txn_id)
+
+ logger.debug(
+ "TX [%s] {%s} Attempting new transaction"
+ " (pdus: %d, edus: %d)",
+ destination, txn_id,
+ len(pdus),
+ len(edus),
+ )
+
+ logger.debug("TX [%s] Persisting transaction...", destination)
+
+ transaction = Transaction.create_new(
+ origin_server_ts=int(self.clock.time_msec()),
+ transaction_id=txn_id,
+ origin=self._server_name,
+ destination=destination,
+ pdus=pdus,
+ edus=edus,
+ )
+
+ self._next_txn_id += 1
+
+ yield self._transaction_actions.prepare_to_send(transaction)
+
+ logger.debug("TX [%s] Persisted transaction", destination)
+ logger.info(
+ "TX [%s] {%s} Sending transaction [%s],"
+ " (PDUs: %d, EDUs: %d)",
+ destination, txn_id,
+ transaction.transaction_id,
+ len(pdus),
+ len(edus),
+ )
+
+ # Actually send the transaction
+
+ # FIXME (erikj): This is a bit of a hack to make the Pdu age
+ # keys work
+ def json_data_cb():
+ data = transaction.get_dict()
+ now = int(self.clock.time_msec())
+ if "pdus" in data:
+ for p in data["pdus"]:
+ if "age_ts" in p:
+ unsigned = p.setdefault("unsigned", {})
+ unsigned["age"] = now - int(p["age_ts"])
+ del p["age_ts"]
+ return data
+
+ try:
+ response = yield self._transport_layer.send_transaction(
+ transaction, json_data_cb
+ )
+ code = 200
+ except HttpResponseException as e:
+ code = e.code
+ response = e.response
+
+ if e.code in (401, 404, 429) or 500 <= e.code:
+ logger.info(
+ "TX [%s] {%s} got %d response",
+ destination, txn_id, code
+ )
+ raise e
+
+ logger.info(
+ "TX [%s] {%s} got %d response",
+ destination, txn_id, code
+ )
+
+ yield self._transaction_actions.delivered(
+ transaction, code, response
+ )
+
+ logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
+
+ if code == 200:
+ for e_id, r in response.get("pdus", {}).items():
+ if "error" in r:
+ logger.warn(
+ "TX [%s] {%s} Remote returned error for %s: %s",
+ destination, txn_id, e_id, r,
+ )
+ else:
+ for p in pdus:
+ logger.warn(
+ "TX [%s] {%s} Failed to send event %s",
+ destination, txn_id, p.event_id,
+ )
+ success = False
+
+ defer.returnValue(success)
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
deleted file mode 100644
index 30941f5..0000000
--- a/synapse/federation/transaction_queue.py
+++ /dev/null
@@ -1,699 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2014-2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import datetime
-import logging
-
-from six import itervalues
-
-from prometheus_client import Counter
-
-from twisted.internet import defer
-
-import synapse.metrics
-from synapse.api.errors import (
- FederationDeniedError,
- HttpResponseException,
- RequestSendFailed,
-)
-from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
-from synapse.metrics import (
- LaterGauge,
- event_processing_loop_counter,
- event_processing_loop_room_count,
- events_processed_counter,
- sent_transactions_counter,
-)
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.util import logcontext
-from synapse.util.metrics import measure_func
-from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
-
-from .persistence import TransactionActions
-from .units import Edu, Transaction
-
-logger = logging.getLogger(__name__)
-
-sent_pdus_destination_dist_count = Counter(
- "synapse_federation_client_sent_pdu_destinations:count",
- "Number of PDUs queued for sending to one or more destinations",
-)
-
-sent_pdus_destination_dist_total = Counter(
- "synapse_federation_client_sent_pdu_destinations:total", ""
- "Total number of PDUs queued for sending across all destinations",
-)
-
-sent_edus_counter = Counter(
- "synapse_federation_client_sent_edus",
- "Total number of EDUs successfully sent",
-)
-
-sent_edus_by_type = Counter(
- "synapse_federation_client_sent_edus_by_type",
- "Number of sent EDUs successfully sent, by event type",
- ["type"],
-)
-
-
-class TransactionQueue(object):
- """This class makes sure we only have one transaction in flight at
- a time for a given destination.
-
- It batches pending PDUs into single transactions.
- """
-
- def __init__(self, hs):
- self.hs = hs
- self.server_name = hs.hostname
-
- self.store = hs.get_datastore()
- self.state = hs.get_state_handler()
- self.transaction_actions = TransactionActions(self.store)
-
- self.transport_layer = hs.get_federation_transport_client()
-
- self.clock = hs.get_clock()
- self.is_mine_id = hs.is_mine_id
-
- # Is a mapping from destinations -> deferreds. Used to keep track
- # of which destinations have transactions in flight and when they are
- # done
- self.pending_transactions = {}
-
- LaterGauge(
- "synapse_federation_transaction_queue_pending_destinations",
- "",
- [],
- lambda: len(self.pending_transactions),
- )
-
- # Is a mapping from destination -> list of
- # tuple(pending pdus, deferred, order)
- self.pending_pdus_by_dest = pdus = {}
- # destination -> list of tuple(edu, deferred)
- self.pending_edus_by_dest = edus = {}
-
- # Map of user_id -> UserPresenceState for all the pending presence
- # to be sent out by user_id. Entries here get processed and put in
- # pending_presence_by_dest
- self.pending_presence = {}
-
- # Map of destination -> user_id -> UserPresenceState of pending presence
- # to be sent to each destinations
- self.pending_presence_by_dest = presence = {}
-
- # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
- # based on their key (e.g. typing events by room_id)
- # Map of destination -> (edu_type, key) -> Edu
- self.pending_edus_keyed_by_dest = edus_keyed = {}
-
- LaterGauge(
- "synapse_federation_transaction_queue_pending_pdus",
- "",
- [],
- lambda: sum(map(len, pdus.values())),
- )
- LaterGauge(
- "synapse_federation_transaction_queue_pending_edus",
- "",
- [],
- lambda: (
- sum(map(len, edus.values()))
- + sum(map(len, presence.values()))
- + sum(map(len, edus_keyed.values()))
- ),
- )
-
- # destination -> stream_id of last successfully sent to-device message.
- # NB: may be a long or an int.
- self.last_device_stream_id_by_dest = {}
-
- # destination -> stream_id of last successfully sent device list
- # update.
- self.last_device_list_stream_id_by_dest = {}
-
- # HACK to get unique tx id
- self._next_txn_id = int(self.clock.time_msec())
-
- self._order = 1
-
- self._is_processing = False
- self._last_poked_id = -1
-
- self._processing_pending_presence = False
-
- def notify_new_events(self, current_id):
- """This gets called when we have some new events we might want to
- send out to other servers.
- """
- self._last_poked_id = max(current_id, self._last_poked_id)
-
- if self._is_processing:
- return
-
- # fire off a processing loop in the background
- run_as_background_process(
- "process_event_queue_for_federation",
- self._process_event_queue_loop,
- )
-
- @defer.inlineCallbacks
- def _process_event_queue_loop(self):
- try:
- self._is_processing = True
- while True:
- last_token = yield self.store.get_federation_out_pos("events")
- next_token, events = yield self.store.get_all_new_events_stream(
- last_token, self._last_poked_id, limit=100,
- )
-
- logger.debug("Handling %s -> %s", last_token, next_token)
-
- if not events and next_token >= self._last_poked_id:
- break
-
- @defer.inlineCallbacks
- def handle_event(event):
- # Only send events for this server.
- send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
- is_mine = self.is_mine_id(event.sender)
- if not is_mine and send_on_behalf_of is None:
- return
-
- try:
- # Get the state from before the event.
- # We need to make sure that this is the state from before
- # the event and not from after it.
- # Otherwise if the last member on a server in a room is
- # banned then it won't receive the event because it won't
- # be in the room after the ban.
- destinations = yield self.state.get_current_hosts_in_room(
- event.room_id, latest_event_ids=event.prev_event_ids(),
- )
- except Exception:
- logger.exception(
- "Failed to calculate hosts in room for event: %s",
- event.event_id,
- )
- return
-
- destinations = set(destinations)
-
- if send_on_behalf_of is not None:
- # If we are sending the event on behalf of another server
- # then it already has the event and there is no reason to
- # send the event to it.
- destinations.discard(send_on_behalf_of)
-
- logger.debug("Sending %s to %r", event, destinations)
-
- self._send_pdu(event, destinations)
-
- @defer.inlineCallbacks
- def handle_room_events(events):
- for event in events:
- yield handle_event(event)
-
- events_by_room = {}
- for event in events:
- events_by_room.setdefault(event.room_id, []).append(event)
-
- yield logcontext.make_deferred_yieldable(defer.gatherResults(
- [
- logcontext.run_in_background(handle_room_events, evs)
- for evs in itervalues(events_by_room)
- ],
- consumeErrors=True
- ))
-
- yield self.store.update_federation_out_pos(
- "events", next_token
- )
-
- if events:
- now = self.clock.time_msec()
- ts = yield self.store.get_received_ts(events[-1].event_id)
-
- synapse.metrics.event_processing_lag.labels(
- "federation_sender").set(now - ts)
- synapse.metrics.event_processing_last_ts.labels(
- "federation_sender").set(ts)
-
- events_processed_counter.inc(len(events))
-
- event_processing_loop_room_count.labels(
- "federation_sender"
- ).inc(len(events_by_room))
-
- event_processing_loop_counter.labels("federation_sender").inc()
-
- synapse.metrics.event_processing_positions.labels(
- "federation_sender").set(next_token)
-
- finally:
- self._is_processing = False
-
- def _send_pdu(self, pdu, destinations):
- # We loop through all destinations to see whether we already have
- # a transaction in progress. If we do, stick it in the pending_pdus
- # table and we'll get back to it later.
-
- order = self._order
- self._order += 1
-
- destinations = set(destinations)
- destinations.discard(self.server_name)
- logger.debug("Sending to: %s", str(destinations))
-
- if not destinations:
- return
-
- sent_pdus_destination_dist_total.inc(len(destinations))
- sent_pdus_destination_dist_count.inc()
-
- for destination in destinations:
- self.pending_pdus_by_dest.setdefault(destination, []).append(
- (pdu, order)
- )
-
- self._attempt_new_transaction(destination)
-
- @logcontext.preserve_fn # the caller should not yield on this
- @defer.inlineCallbacks
- def send_presence(self, states):
- """Send the new presence states to the appropriate destinations.
-
- This actually queues up the presence states ready for sending and
- triggers a background task to process them and send out the transactions.
-
- Args:
- states (list(UserPresenceState))
- """
- if not self.hs.config.use_presence:
- # No-op if presence is disabled.
- return
-
- # First we queue up the new presence by user ID, so multiple presence
- # updates in quick successtion are correctly handled
- # We only want to send presence for our own users, so lets always just
- # filter here just in case.
- self.pending_presence.update({
- state.user_id: state for state in states
- if self.is_mine_id(state.user_id)
- })
-
- # We then handle the new pending presence in batches, first figuring
- # out the destinations we need to send each state to and then poking it
- # to attempt a new transaction. We linearize this so that we don't
- # accidentally mess up the ordering and send multiple presence updates
- # in the wrong order
- if self._processing_pending_presence:
- return
-
- self._processing_pending_presence = True
- try:
- while True:
- states_map = self.pending_presence
- self.pending_presence = {}
-
- if not states_map:
- break
-
- yield self._process_presence_inner(list(states_map.values()))
- except Exception:
- logger.exception("Error sending presence states to servers")
- finally:
- self._processing_pending_presence = False
-
- @measure_func("txnqueue._process_presence")
- @defer.inlineCallbacks
- def _process_presence_inner(self, states):
- """Given a list of states populate self.pending_presence_by_dest and
- poke to send a new transaction to each destination
-
- Args:
- states (list(UserPresenceState))
- """
- hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
-
- for destinations, states in hosts_and_states:
- for destination in destinations:
- if destination == self.server_name:
- continue
-
- self.pending_presence_by_dest.setdefault(
- destination, {}
- ).update({
- state.user_id: state for state in states
- })
-
- self._attempt_new_transaction(destination)
-
- def send_edu(self, destination, edu_type, content, key=None):
- edu = Edu(
- origin=self.server_name,
- destination=destination,
- edu_type=edu_type,
- content=content,
- )
-
- if destination == self.server_name:
- logger.info("Not sending EDU to ourselves")
- return
-
- if key:
- self.pending_edus_keyed_by_dest.setdefault(
- destination, {}
- )[(edu.edu_type, key)] = edu
- else:
- self.pending_edus_by_dest.setdefault(destination, []).append(edu)
-
- self._attempt_new_transaction(destination)
-
- def send_device_messages(self, destination):
- if destination == self.server_name:
- logger.info("Not sending device update to ourselves")
- return
-
- self._attempt_new_transaction(destination)
-
- def get_current_token(self):
- return 0
-
- def _attempt_new_transaction(self, destination):
- """Try to start a new transaction to this destination
-
- If there is already a transaction in progress to this destination,
- returns immediately. Otherwise kicks off the process of sending a
- transaction in the background.
-
- Args:
- destination (str):
-
- Returns:
- None
- """
- # list of (pending_pdu, deferred, order)
- if destination in self.pending_transactions:
- # XXX: pending_transactions can get stuck on by a never-ending
- # request at which point pending_pdus_by_dest just keeps growing.
- # we need application-layer timeouts of some flavour of these
- # requests
- logger.debug(
- "TX [%s] Transaction already in progress",
- destination
- )
- return
-
- logger.debug("TX [%s] Starting transaction loop", destination)
-
- run_as_background_process(
- "federation_transaction_transmission_loop",
- self._transaction_transmission_loop,
- destination,
- )
-
- @defer.inlineCallbacks
- def _transaction_transmission_loop(self, destination):
- pending_pdus = []
- try:
- self.pending_transactions[destination] = 1
-
- # This will throw if we wouldn't retry. We do this here so we fail
- # quickly, but we will later check this again in the http client,
- # hence why we throw the result away.
- yield get_retry_limiter(destination, self.clock, self.store)
-
- pending_pdus = []
- while True:
- device_message_edus, device_stream_id, dev_list_id = (
- yield self._get_new_device_messages(destination)
- )
-
- # BEGIN CRITICAL SECTION
- #
- # In order to avoid a race condition, we need to make sure that
- # the following code (from popping the queues up to the point
- # where we decide if we actually have any pending messages) is
- # atomic - otherwise new PDUs or EDUs might arrive in the
- # meantime, but not get sent because we hold the
- # pending_transactions flag.
-
- pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
-
- # We can only include at most 50 PDUs per transactions
- pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
- if leftover_pdus:
- self.pending_pdus_by_dest[destination] = leftover_pdus
-
- pending_edus = self.pending_edus_by_dest.pop(destination, [])
-
- # We can only include at most 100 EDUs per transactions
- pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
- if leftover_edus:
- self.pending_edus_by_dest[destination] = leftover_edus
-
- pending_presence = self.pending_presence_by_dest.pop(destination, {})
-
- pending_edus.extend(
- self.pending_edus_keyed_by_dest.pop(destination, {}).values()
- )
-
- pending_edus.extend(device_message_edus)
- if pending_presence:
- pending_edus.append(
- Edu(
- origin=self.server_name,
- destination=destination,
- edu_type="m.presence",
- content={
- "push": [
- format_user_presence_state(
- presence, self.clock.time_msec()
- )
- for presence in pending_presence.values()
- ]
- },
- )
- )
-
- if pending_pdus:
- logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
- destination, len(pending_pdus))
-
- if not pending_pdus and not pending_edus:
- logger.debug("TX [%s] Nothing to send", destination)
- self.last_device_stream_id_by_dest[destination] = (
- device_stream_id
- )
- return
-
- # END CRITICAL SECTION
-
- success = yield self._send_new_transaction(
- destination, pending_pdus, pending_edus,
- )
- if success:
- sent_transactions_counter.inc()
- sent_edus_counter.inc(len(pending_edus))
- for edu in pending_edus:
- sent_edus_by_type.labels(edu.edu_type).inc()
- # Remove the acknowledged device messages from the database
- # Only bother if we actually sent some device messages
- if device_message_edus:
- yield self.store.delete_device_msgs_for_remote(
- destination, device_stream_id
- )
- logger.info("Marking as sent %r %r", destination, dev_list_id)
- yield self.store.mark_as_sent_devices_by_remote(
- destination, dev_list_id
- )
-
- self.last_device_stream_id_by_dest[destination] = device_stream_id
- self.last_device_list_stream_id_by_dest[destination] = dev_list_id
- else:
- break
- except NotRetryingDestination as e:
- logger.debug(
- "TX [%s] not ready for retry yet (next retry at %s) - "
- "dropping transaction for now",
- destination,
- datetime.datetime.fromtimestamp(
- (e.retry_last_ts + e.retry_interval) / 1000.0
- ),
- )
- except FederationDeniedError as e:
- logger.info(e)
- except HttpResponseException as e:
- logger.warning(
- "TX [%s] Received %d response to transaction: %s",
- destination, e.code, e,
- )
- except RequestSendFailed as e:
- logger.warning("TX [%s] Failed to send transaction: %s", destination, e)
-
- for p, _ in pending_pdus:
- logger.info("Failed to send event %s to %s", p.event_id,
- destination)
- except Exception:
- logger.exception(
- "TX [%s] Failed to send transaction",
- destination,
- )
- for p, _ in pending_pdus:
- logger.info("Failed to send event %s to %s", p.event_id,
- destination)
- finally:
- # We want to be *very* sure we delete this after we stop processing
- self.pending_transactions.pop(destination, None)
-
- @defer.inlineCallbacks
- def _get_new_device_messages(self, destination):
- last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
- to_device_stream_id = self.store.get_to_device_stream_token()
- contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
- destination, last_device_stream_id, to_device_stream_id
- )
- edus = [
- Edu(
- origin=self.server_name,
- destination=destination,
- edu_type="m.direct_to_device",
- content=content,
- )
- for content in contents
- ]
-
- last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
- now_stream_id, results = yield self.store.get_devices_by_remote(
- destination, last_device_list
- )
- edus.extend(
- Edu(
- origin=self.server_name,
- destination=destination,
- edu_type="m.device_list_update",
- content=content,
- )
- for content in results
- )
- defer.returnValue((edus, stream_id, now_stream_id))
-
- @measure_func("_send_new_transaction")
- @defer.inlineCallbacks
- def _send_new_transaction(self, destination, pending_pdus, pending_edus):
-
- # Sort based on the order field
- pending_pdus.sort(key=lambda t: t[1])
- pdus = [x[0] for x in pending_pdus]
- edus = pending_edus
-
- success = True
-
- logger.debug("TX [%s] _attempt_new_transaction", destination)
-
- txn_id = str(self._next_txn_id)
-
- logger.debug(
- "TX [%s] {%s} Attempting new transaction"
- " (pdus: %d, edus: %d)",
- destination, txn_id,
- len(pdus),
- len(edus),
- )
-
- logger.debug("TX [%s] Persisting transaction...", destination)
-
- transaction = Transaction.create_new(
- origin_server_ts=int(self.clock.time_msec()),
- transaction_id=txn_id,
- origin=self.server_name,
- destination=destination,
- pdus=pdus,
- edus=edus,
- )
-
- self._next_txn_id += 1
-
- yield self.transaction_actions.prepare_to_send(transaction)
-
- logger.debug("TX [%s] Persisted transaction", destination)
- logger.info(
- "TX [%s] {%s} Sending transaction [%s],"
- " (PDUs: %d, EDUs: %d)",
- destination, txn_id,
- transaction.transaction_id,
- len(pdus),
- len(edus),
- )
-
- # Actually send the transaction
-
- # FIXME (erikj): This is a bit of a hack to make the Pdu age
- # keys work
- def json_data_cb():
- data = transaction.get_dict()
- now = int(self.clock.time_msec())
- if "pdus" in data:
- for p in data["pdus"]:
- if "age_ts" in p:
- unsigned = p.setdefault("unsigned", {})
- unsigned["age"] = now - int(p["age_ts"])
- del p["age_ts"]
- return data
-
- try:
- response = yield self.transport_layer.send_transaction(
- transaction, json_data_cb
- )
- code = 200
- except HttpResponseException as e:
- code = e.code
- response = e.response
-
- if e.code in (401, 404, 429) or 500 <= e.code:
- logger.info(
- "TX [%s] {%s} got %d response",
- destination, txn_id, code
- )
- raise e
-
- logger.info(
- "TX [%s] {%s} got %d response",
- destination, txn_id, code
- )
-
- yield self.transaction_actions.delivered(
- transaction, code, response
- )
-
- logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
-
- if code == 200:
- for e_id, r in response.get("pdus", {}).items():
- if "error" in r:
- logger.warn(
- "TX [%s] {%s} Remote returned error for %s: %s",
- destination, txn_id, e_id, r,
- )
- else:
- for p in pdus:
- logger.warn(
- "TX [%s] {%s} Failed to send event %s",
- destination, txn_id, p.event_id,
- )
- success = False
-
- defer.returnValue(success)
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 8e2be21..e424c40 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -51,9 +51,10 @@ class TransportLayerClient(object):
logger.debug("get_room_state dest=%s, room=%s",
destination, room_id)
- path = _create_v1_path("/state/%s/", room_id)
+ path = _create_v1_path("/state/%s", room_id)
return self.client.get_json(
destination, path=path, args={"event_id": event_id},
+ try_trailing_slash_on_400=True,
)
@log_function
@@ -73,9 +74,10 @@ class TransportLayerClient(object):
logger.debug("get_room_state_ids dest=%s, room=%s",
destination, room_id)
- path = _create_v1_path("/state_ids/%s/", room_id)
+ path = _create_v1_path("/state_ids/%s", room_id)
return self.client.get_json(
destination, path=path, args={"event_id": event_id},
+ try_trailing_slash_on_400=True,
)
@log_function
@@ -95,8 +97,11 @@ class TransportLayerClient(object):
logger.debug("get_pdu dest=%s, event_id=%s",
destination, event_id)
- path = _create_v1_path("/event/%s/", event_id)
- return self.client.get_json(destination, path=path, timeout=timeout)
+ path = _create_v1_path("/event/%s", event_id)
+ return self.client.get_json(
+ destination, path=path, timeout=timeout,
+ try_trailing_slash_on_400=True,
+ )
@log_function
def backfill(self, destination, room_id, event_tuples, limit):
@@ -121,7 +126,7 @@ class TransportLayerClient(object):
# TODO: raise?
return
- path = _create_v1_path("/backfill/%s/", room_id)
+ path = _create_v1_path("/backfill/%s", room_id)
args = {
"v": event_tuples,
@@ -132,6 +137,7 @@ class TransportLayerClient(object):
destination,
path=path,
args=args,
+ try_trailing_slash_on_400=True,
)
@defer.inlineCallbacks
@@ -167,7 +173,7 @@ class TransportLayerClient(object):
# generated by the json_data_callback.
json_data = transaction.get_dict()
- path = _create_v1_path("/send/%s/", transaction.transaction_id)
+ path = _create_v1_path("/send/%s", transaction.transaction_id)
response = yield self.client.put_json(
transaction.destination,
@@ -176,6 +182,7 @@ class TransportLayerClient(object):
json_data_callback=json_data_callback,
long_retries=True,
backoff_on_404=True, # If we get a 404 the other side has gone
+ try_trailing_slash_on_400=True,
)
defer.returnValue(response)
@@ -959,7 +966,7 @@ def _create_v1_path(path, *args):
Example:
- _create_v1_path("/event/%s/", event_id)
+ _create_v1_path("/event/%s", event_id)
Args:
path (str): String template for the path
@@ -980,7 +987,7 @@ def _create_v2_path(path, *args):
Example:
- _create_v2_path("/event/%s/", event_id)
+ _create_v2_path("/event/%s", event_id)
Args:
path (str): String template for the path
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 5ba94be..385eda2 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -21,8 +21,8 @@ import re
from twisted.internet import defer
import synapse
-from synapse.api.constants import RoomVersions
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
+from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
@@ -63,11 +63,7 @@ class TransportLayerServer(JsonResource):
self.authenticator = Authenticator(hs)
self.ratelimiter = FederationRateLimiter(
self.clock,
- window_size=hs.config.federation_rc_window_size,
- sleep_limit=hs.config.federation_rc_sleep_limit,
- sleep_msec=hs.config.federation_rc_sleep_delay,
- reject_limit=hs.config.federation_rc_reject_limit,
- concurrent_requests=hs.config.federation_rc_concurrent,
+ config=hs.config.rc_federation,
)
self.register_servlets()
@@ -312,7 +308,7 @@ class BaseFederationServlet(object):
class FederationSendServlet(BaseFederationServlet):
- PATH = "/send/(?P<transaction_id>[^/]*)/"
+ PATH = "/send/(?P<transaction_id>[^/]*)/?"
def __init__(self, handler, server_name, **kwargs):
super(FederationSendServlet, self).__init__(
@@ -378,7 +374,7 @@ class FederationSendServlet(BaseFederationServlet):
class FederationEventServlet(BaseFederationServlet):
- PATH = "/event/(?P<event_id>[^/]*)/"
+ PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
def on_GET(self, origin, content, query, event_id):
@@ -386,30 +382,30 @@ class FederationEventServlet(BaseFederationServlet):
class FederationStateServlet(BaseFederationServlet):
- PATH = "/state/(?P<context>[^/]*)/"
+ PATH = "/state/(?P<context>[^/]*)/?"
# This is when someone asks for all data for a given context.
def on_GET(self, origin, content, query, context):
return self.handler.on_context_state_request(
origin,
context,
- parse_string_from_args(query, "event_id", None),
+ parse_string_from_args(query, "event_id", None, required=True),
)
class FederationStateIdsServlet(BaseFederationServlet):
- PATH = "/state_ids/(?P<room_id>[^/]*)/"
+ PATH = "/state_ids/(?P<room_id>[^/]*)/?"
def on_GET(self, origin, content, query, room_id):
return self.handler.on_state_ids_request(
origin,
room_id,
- parse_string_from_args(query, "event_id", None),
+ parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServlet):
- PATH = "/backfill/(?P<context>[^/]*)/"
+ PATH = "/backfill/(?P<context>[^/]*)/?"
def on_GET(self, origin, content, query, context):
versions = [x.decode('ascii') for x in query[b"v"]]
@@ -513,7 +509,7 @@ class FederationV1InviteServlet(BaseFederationServlet):
# state resolution algorithm, and we don't use that for processing
# invites
content = yield self.handler.on_invite_request(
- origin, content, room_version=RoomVersions.V1,
+ origin, content, room_version=RoomVersions.V1.identifier,
)
# V1 federation API is defined to return a content of `[200, {...}]`
@@ -716,8 +712,17 @@ class PublicRoomList(BaseFederationServlet):
PATH = "/publicRooms"
+ def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access):
+ super(PublicRoomList, self).__init__(
+ handler, authenticator, ratelimiter, server_name,
+ )
+ self.deny_access = deny_access
+
@defer.inlineCallbacks
def on_GET(self, origin, content, query):
+ if self.deny_access:
+ raise FederationDeniedError(origin)
+
limit = parse_integer_from_args(query, "limit", 0)
since_token = parse_string_from_args(query, "since", None)
include_all_networks = parse_boolean_from_args(
@@ -759,7 +764,7 @@ class FederationVersionServlet(BaseFederationServlet):
class FederationGroupsProfileServlet(BaseFederationServlet):
"""Get/set the basic profile of a group on behalf of a user
"""
- PATH = "/groups/(?P<group_id>[^/]*)/profile$"
+ PATH = "/groups/(?P<group_id>[^/]*)/profile"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, group_id):
@@ -787,7 +792,7 @@ class FederationGroupsProfileServlet(BaseFederationServlet):
class FederationGroupsSummaryServlet(BaseFederationServlet):
- PATH = "/groups/(?P<group_id>[^/]*)/summary$"
+ PATH = "/groups/(?P<group_id>[^/]*)/summary"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, group_id):
@@ -805,7 +810,7 @@ class FederationGroupsSummaryServlet(BaseFederationServlet):
class FederationGroupsRoomsServlet(BaseFederationServlet):
"""Get the rooms in a group on behalf of a user
"""
- PATH = "/groups/(?P<group_id>[^/]*)/rooms$"
+ PATH = "/groups/(?P<group_id>[^/]*)/rooms"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, group_id):
@@ -823,7 +828,7 @@ class FederationGroupsRoomsServlet(BaseFederationServlet):
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
"""Add/remove room from group
"""
- PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)$"
+ PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, room_id):
@@ -855,7 +860,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
- "/config/(?P<config_key>[^/]*)$"
+ "/config/(?P<config_key>[^/]*)"
)
@defer.inlineCallbacks
@@ -874,7 +879,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
class FederationGroupsUsersServlet(BaseFederationServlet):
"""Get the users in a group on behalf of a user
"""
- PATH = "/groups/(?P<group_id>[^/]*)/users$"
+ PATH = "/groups/(?P<group_id>[^/]*)/users"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, group_id):
@@ -892,7 +897,7 @@ class FederationGroupsUsersServlet(BaseFederationServlet):
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
"""Get the users that have been invited to a group
"""
- PATH = "/groups/(?P<group_id>[^/]*)/invited_users$"
+ PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, group_id):
@@ -910,7 +915,7 @@ class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
class FederationGroupsInviteServlet(BaseFederationServlet):
"""Ask a group server to invite someone to the group
"""
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -928,7 +933,7 @@ class FederationGroupsInviteServlet(BaseFederationServlet):
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
"""Accept an invitation from the group server
"""
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite$"
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -945,7 +950,7 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
class FederationGroupsJoinServlet(BaseFederationServlet):
"""Attempt to join a group
"""
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join$"
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -962,7 +967,7 @@ class FederationGroupsJoinServlet(BaseFederationServlet):
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
"""Leave or kick a user from the group
"""
- PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
+ PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -980,7 +985,7 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet):
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
"""A group server has invited a local user
"""
- PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
+ PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -997,7 +1002,7 @@ class FederationGroupsLocalInviteServlet(BaseFederationServlet):
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
"""A group server has removed a local user
"""
- PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
+ PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -1014,7 +1019,7 @@ class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
"""A group or user's server renews their attestation
"""
- PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)$"
+ PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, group_id, user_id):
@@ -1037,7 +1042,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
- "/rooms/(?P<room_id>[^/]*)$"
+ "/rooms/(?P<room_id>[^/]*)"
)
@defer.inlineCallbacks
@@ -1080,7 +1085,7 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet):
"""Get all categories for a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/categories/$"
+ "/groups/(?P<group_id>[^/]*)/categories/?"
)
@defer.inlineCallbacks
@@ -1100,7 +1105,7 @@ class FederationGroupsCategoryServlet(BaseFederationServlet):
"""Add/remove/get a category in a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
+ "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
)
@defer.inlineCallbacks
@@ -1150,7 +1155,7 @@ class FederationGroupsRolesServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/roles/$"
+ "/groups/(?P<group_id>[^/]*)/roles/?"
)
@defer.inlineCallbacks
@@ -1170,7 +1175,7 @@ class FederationGroupsRoleServlet(BaseFederationServlet):
"""Add/remove/get a role in a group
"""
PATH = (
- "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$"
+ "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
)
@defer.inlineCallbacks
@@ -1226,7 +1231,7 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/roles/(?P<role_id>[^/]+))?"
- "/users/(?P<user_id>[^/]*)$"
+ "/users/(?P<user_id>[^/]*)"
)
@defer.inlineCallbacks
@@ -1269,7 +1274,7 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = (
- "/get_groups_publicised$"
+ "/get_groups_publicised"
)
@defer.inlineCallbacks
@@ -1284,7 +1289,7 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
"""Sets whether a group is joinable without an invite or knock
"""
- PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy$"
+ PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, group_id):
@@ -1417,6 +1422,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
+ deny_access=hs.config.restrict_public_rooms_to_local_users,
).register(resource)
if "group_server" in servlet_groups:
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index a7eaead..817be40 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -22,6 +22,7 @@ from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
+from synapse.util.async_helpers import concurrently_execute
logger = logging.getLogger(__name__)
@@ -896,6 +897,78 @@ class GroupsServerHandler(object):
"group_id": group_id,
})
+ @defer.inlineCallbacks
+ def delete_group(self, group_id, requester_user_id):
+ """Deletes a group, kicking out all current members.
+
+ Only group admins or server admins can call this request
+
+ Args:
+ group_id (str)
+ request_user_id (str)
+
+ Returns:
+ Deferred
+ """
+
+ yield self.check_group_is_ours(
+ group_id, requester_user_id,
+ and_exists=True,
+ )
+
+ # Only server admins or group admins can delete groups.
+
+ is_admin = yield self.store.is_user_admin_in_group(
+ group_id, requester_user_id
+ )
+
+ if not is_admin:
+ is_admin = yield self.auth.is_server_admin(
+ UserID.from_string(requester_user_id),
+ )
+
+ if not is_admin:
+ raise SynapseError(403, "User is not an admin")
+
+ # Before deleting the group lets kick everyone out of it
+ users = yield self.store.get_users_in_group(
+ group_id, include_private=True,
+ )
+
+ @defer.inlineCallbacks
+ def _kick_user_from_group(user_id):
+ if self.hs.is_mine_id(user_id):
+ groups_local = self.hs.get_groups_local_handler()
+ yield groups_local.user_removed_from_group(group_id, user_id, {})
+ else:
+ yield self.transport_client.remove_user_from_group_notification(
+ get_domain_from_id(user_id), group_id, user_id, {}
+ )
+ yield self.store.maybe_delete_remote_profile_cache(user_id)
+
+ # We kick users out in the order of:
+ # 1. Non-admins
+ # 2. Other admins
+ # 3. The requester
+ #
+ # This is so that if the deletion fails for some reason other admins or
+ # the requester still has auth to retry.
+ non_admins = []
+ admins = []
+ for u in users:
+ if u["user_id"] == requester_user_id:
+ continue
+ if u["is_admin"]:
+ admins.append(u["user_id"])
+ else:
+ non_admins.append(u["user_id"])
+
+ yield concurrently_execute(_kick_user_from_group, non_admins, 10)
+ yield concurrently_execute(_kick_user_from_group, admins, 10)
+ yield _kick_user_from_group(requester_user_id)
+
+ yield self.store.delete_group(group_id)
+
def _parse_join_policy_from_contents(content):
"""Given a content for a request, return the specified join policy or None
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 594754c..dca337e 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -90,12 +90,12 @@ class BaseHandler(object):
messages_per_second = override.messages_per_second
burst_count = override.burst_count
else:
- messages_per_second = self.hs.config.rc_messages_per_second
- burst_count = self.hs.config.rc_message_burst_count
+ messages_per_second = self.hs.config.rc_message.per_second
+ burst_count = self.hs.config.rc_message.burst_count
- allowed, time_allowed = self.ratelimiter.send_message(
+ allowed, time_allowed = self.ratelimiter.can_do_action(
user_id, time_now,
- msg_rate_hz=messages_per_second,
+ rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
)
@@ -165,6 +165,7 @@ class BaseHandler(object):
member_event.room_id,
"leave",
ratelimit=False,
+ require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
new file mode 100644
index 0000000..2614465
--- /dev/null
+++ b/synapse/handlers/account_validity.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import email.mime.multipart
+import email.utils
+import logging
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+from twisted.internet import defer
+
+from synapse.api.errors import StoreError
+from synapse.types import UserID
+from synapse.util import stringutils
+from synapse.util.logcontext import make_deferred_yieldable
+
+try:
+ from synapse.push.mailer import load_jinja2_templates
+except ImportError:
+ load_jinja2_templates = None
+
+logger = logging.getLogger(__name__)
+
+
+class AccountValidityHandler(object):
+ def __init__(self, hs):
+ self.hs = hs
+ self.store = self.hs.get_datastore()
+ self.sendmail = self.hs.get_sendmail()
+ self.clock = self.hs.get_clock()
+
+ self._account_validity = self.hs.config.account_validity
+
+ if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
+ # Don't do email-specific configuration if renewal by email is disabled.
+ try:
+ app_name = self.hs.config.email_app_name
+
+ self._subject = self._account_validity.renew_email_subject % {
+ "app": app_name,
+ }
+
+ self._from_string = self.hs.config.email_notif_from % {
+ "app": app_name,
+ }
+ except Exception:
+ # If substitution failed, fall back to the bare strings.
+ self._subject = self._account_validity.renew_email_subject
+ self._from_string = self.hs.config.email_notif_from
+
+ self._raw_from = email.utils.parseaddr(self._from_string)[1]
+
+ self._template_html, self._template_text = load_jinja2_templates(
+ config=self.hs.config,
+ template_html_name=self.hs.config.email_expiry_template_html,
+ template_text_name=self.hs.config.email_expiry_template_text,
+ )
+
+ # Check the renewal emails to send and send them every 30min.
+ self.clock.looping_call(
+ self.send_renewal_emails,
+ 30 * 60 * 1000,
+ )
+
+ @defer.inlineCallbacks
+ def send_renewal_emails(self):
+ """Gets the list of users whose account is expiring in the amount of time
+ configured in the ``renew_at`` parameter from the ``account_validity``
+ configuration, and sends renewal emails to all of these users as long as they
+ have an email 3PID attached to their account.
+ """
+ expiring_users = yield self.store.get_users_expiring_soon()
+
+ if expiring_users:
+ for user in expiring_users:
+ yield self._send_renewal_email(
+ user_id=user["user_id"],
+ expiration_ts=user["expiration_ts_ms"],
+ )
+
+ @defer.inlineCallbacks
+ def send_renewal_email_to_user(self, user_id):
+ expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+ yield self._send_renewal_email(user_id, expiration_ts)
+
+ @defer.inlineCallbacks
+ def _send_renewal_email(self, user_id, expiration_ts):
+ """Sends out a renewal email to every email address attached to the given user
+ with a unique link allowing them to renew their account.
+
+ Args:
+ user_id (str): ID of the user to send email(s) to.
+ expiration_ts (int): Timestamp in milliseconds for the expiration date of
+ this user's account (used in the email templates).
+ """
+ addresses = yield self._get_email_addresses_for_user(user_id)
+
+ # Stop right here if the user doesn't have at least one email address.
+ # In this case, they will have to ask their server admin to renew their
+ # account manually.
+ if not addresses:
+ return
+
+ try:
+ user_display_name = yield self.store.get_profile_displayname(
+ UserID.from_string(user_id).localpart
+ )
+ if user_display_name is None:
+ user_display_name = user_id
+ except StoreError:
+ user_display_name = user_id
+
+ renewal_token = yield self._get_renewal_token(user_id)
+ url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
+ self.hs.config.public_baseurl,
+ renewal_token,
+ )
+
+ template_vars = {
+ "display_name": user_display_name,
+ "expiration_ts": expiration_ts,
+ "url": url,
+ }
+
+ html_text = self._template_html.render(**template_vars)
+ html_part = MIMEText(html_text, "html", "utf8")
+
+ plain_text = self._template_text.render(**template_vars)
+ text_part = MIMEText(plain_text, "plain", "utf8")
+
+ for address in addresses:
+ raw_to = email.utils.parseaddr(address)[1]
+
+ multipart_msg = MIMEMultipart('alternative')
+ multipart_msg['Subject'] = self._subject
+ multipart_msg['From'] = self._from_string
+ multipart_msg['To'] = address
+ multipart_msg['Date'] = email.utils.formatdate()
+ multipart_msg['Message-ID'] = email.utils.make_msgid()
+ multipart_msg.attach(text_part)
+ multipart_msg.attach(html_part)
+
+ logger.info("Sending renewal email to %s", address)
+
+ yield make_deferred_yieldable(self.sendmail(
+ self.hs.config.email_smtp_host,
+ self._raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
+ reactor=self.hs.get_reactor(),
+ port=self.hs.config.email_smtp_port,
+ requireAuthentication=self.hs.config.email_smtp_user is not None,
+ username=self.hs.config.email_smtp_user,
+ password=self.hs.config.email_smtp_pass,
+ requireTransportSecurity=self.hs.config.require_transport_security
+ ))
+
+ yield self.store.set_renewal_mail_status(
+ user_id=user_id,
+ email_sent=True,
+ )
+
+ @defer.inlineCallbacks
+ def _get_email_addresses_for_user(self, user_id):
+ """Retrieve the list of email addresses attached to a user's account.
+
+ Args:
+ user_id (str): ID of the user to lookup email addresses for.
+
+ Returns:
+ defer.Deferred[list[str]]: Email addresses for this account.
+ """
+ threepids = yield self.store.user_get_threepids(user_id)
+
+ addresses = []
+ for threepid in threepids:
+ if threepid["medium"] == "email":
+ addresses.append(threepid["address"])
+
+ defer.returnValue(addresses)
+
+ @defer.inlineCallbacks
+ def _get_renewal_token(self, user_id):
+ """Generates a 32-byte long random string that will be inserted into the
+ user's renewal email's unique link, then saves it into the database.
+
+ Args:
+ user_id (str): ID of the user to generate a string for.
+
+ Returns:
+ defer.Deferred[str]: The generated string.
+
+ Raises:
+ StoreError(500): Couldn't generate a unique string after 5 attempts.
+ """
+ attempts = 0
+ while attempts < 5:
+ try:
+ renewal_token = stringutils.random_string(32)
+ yield self.store.set_renewal_token_for_user(user_id, renewal_token)
+ defer.returnValue(renewal_token)
+ except StoreError:
+ attempts += 1
+ raise StoreError(500, "Couldn't generate a unique string as refresh string.")
+
+ @defer.inlineCallbacks
+ def renew_account(self, renewal_token):
+ """Renews the account attached to a given renewal token by pushing back the
+ expiration date by the current validity period in the server's configuration.
+
+ Args:
+ renewal_token (str): Token sent with the renewal request.
+ """
+ user_id = yield self.store.get_user_from_renewal_token(renewal_token)
+ logger.debug("Renewing an account for user %s", user_id)
+ yield self.renew_account_for_user(user_id)
+
+ @defer.inlineCallbacks
+ def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
+ """Renews the account attached to a given user by pushing back the
+ expiration date by the current validity period in the server's
+ configuration.
+
+ Args:
+ renewal_token (str): Token sent with the renewal request.
+ expiration_ts (int): New expiration date. Defaults to now + validity period.
+ email_sent (bool): Whether an email has been sent for this validity period.
+ Defaults to False.
+
+ Returns:
+ defer.Deferred[int]: New expiration date for this account, as a timestamp
+ in milliseconds since epoch.
+ """
+ if expiration_ts is None:
+ expiration_ts = self.clock.time_msec() + self._account_validity.period
+
+ yield self.store.set_account_validity_for_user(
+ user_id=user_id,
+ expiration_ts=expiration_ts,
+ email_sent=email_sent,
+ )
+
+ defer.returnValue(expiration_ts)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 2abd9af..aa5d89a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -35,6 +35,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.api.ratelimiting import Ratelimiter
from synapse.module_api import ModuleApi
from synapse.types import UserID
from synapse.util import logcontext
@@ -99,6 +100,11 @@ class AuthHandler(BaseHandler):
login_types.append(t)
self._supported_login_types = login_types
+ self._account_ratelimiter = Ratelimiter()
+ self._failed_attempts_ratelimiter = Ratelimiter()
+
+ self._clock = self.hs.get_clock()
+
@defer.inlineCallbacks
def validate_user_via_ui_auth(self, requester, request_body, clientip):
"""
@@ -568,7 +574,12 @@ class AuthHandler(BaseHandler):
Returns:
defer.Deferred: (unicode) canonical_user_id, or None if zero or
multiple matches
+
+ Raises:
+ LimitExceededError if the ratelimiter's login requests count for this
+ user is too high too proceed.
"""
+ self.ratelimit_login_per_account(user_id)
res = yield self._find_user_id_and_pwd_hash(user_id)
if res is not None:
defer.returnValue(res[0])
@@ -634,6 +645,8 @@ class AuthHandler(BaseHandler):
StoreError if there was a problem accessing the database
SynapseError if there was a problem with the request
LoginError if there was an authentication problem.
+ LimitExceededError if the ratelimiter's login requests count for this
+ user is too high too proceed.
"""
if username.startswith('@'):
@@ -643,6 +656,8 @@ class AuthHandler(BaseHandler):
username, self.hs.hostname
).to_string()
+ self.ratelimit_login_per_account(qualified_user_id)
+
login_type = login_submission.get("type")
known_login_type = False
@@ -715,14 +730,57 @@ class AuthHandler(BaseHandler):
if not known_login_type:
raise SynapseError(400, "Unknown login type %s" % login_type)
- # unknown username or invalid password. We raise a 403 here, but note
- # that if we're doing user-interactive login, it turns all LoginErrors
- # into a 401 anyway.
+ # unknown username or invalid password.
+ self._failed_attempts_ratelimiter.ratelimit(
+ qualified_user_id.lower(), time_now_s=self._clock.time(),
+ rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
+ burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
+ update=True,
+ )
+
+ # We raise a 403 here, but note that if we're doing user-interactive
+ # login, it turns all LoginErrors into a 401 anyway.
raise LoginError(
403, "Invalid password",
errcode=Codes.FORBIDDEN
)
+ @defer.inlineCallbacks
+ def check_password_provider_3pid(self, medium, address, password):
+ """Check if a password provider is able to validate a thirdparty login
+
+ Args:
+ medium (str): The medium of the 3pid (ex. email).
+ address (str): The address of the 3pid (ex. jdoe@example.com).
+ password (str): The password of the user.
+
+ Returns:
+ Deferred[(str|None, func|None)]: A tuple of `(user_id,
+ callback)`. If authentication is successful, `user_id` is a `str`
+ containing the authenticated, canonical user ID. `callback` is
+ then either a function to be later run after the server has
+ completed login/registration, or `None`. If authentication was
+ unsuccessful, `user_id` and `callback` are both `None`.
+ """
+ for provider in self.password_providers:
+ if hasattr(provider, "check_3pid_auth"):
+ # This function is able to return a deferred that either
+ # resolves None, meaning authentication failure, or upon
+ # success, to a str (which is the user_id) or a tuple of
+ # (user_id, callback_func), where callback_func should be run
+ # after we've finished everything else
+ result = yield provider.check_3pid_auth(
+ medium, address, password,
+ )
+ if result:
+ # Check if the return value is a str or a tuple
+ if isinstance(result, str):
+ # If it's a str, set callback function to None
+ result = (result, None)
+ defer.returnValue(result)
+
+ defer.returnValue((None, None))
+
@defer.inlineCallbacks
def _check_local_password(self, user_id, password):
"""Authenticate a user against the local password database.
@@ -734,7 +792,12 @@ class AuthHandler(BaseHandler):
user_id (unicode): complete @user:id
password (unicode): the provided password
Returns:
- (unicode) the canonical_user_id, or None if unknown user / bad password
+ Deferred[unicode] the canonical_user_id, or Deferred[None] if
+ unknown user/bad password
+
+ Raises:
+ LimitExceededError if the ratelimiter's login requests count for this
+ user is too high too proceed.
"""
lookupres = yield self._find_user_id_and_pwd_hash(user_id)
if not lookupres:
@@ -763,6 +826,7 @@ class AuthHandler(BaseHandler):
auth_api.validate_macaroon(macaroon, "login", True, user_id)
except Exception:
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
+ self.ratelimit_login_per_account(user_id)
yield self.auth.check_auth_blocking(user_id)
defer.returnValue(user_id)
@@ -848,7 +912,7 @@ class AuthHandler(BaseHandler):
)
@defer.inlineCallbacks
- def delete_threepid(self, user_id, medium, address):
+ def delete_threepid(self, user_id, medium, address, id_server=None):
"""Attempts to unbind the 3pid on the identity servers and deletes it
from the local database.
@@ -856,6 +920,10 @@ class AuthHandler(BaseHandler):
user_id (str)
medium (str)
address (str)
+ id_server (str|None): Use the given identity server when unbinding
+ any threepids. If None then will attempt to unbind using the
+ identity server specified when binding (if known).
+
Returns:
Deferred[bool]: Returns True if successfully unbound the 3pid on
@@ -873,6 +941,7 @@ class AuthHandler(BaseHandler):
{
'medium': medium,
'address': address,
+ 'id_server': id_server,
},
)
@@ -934,6 +1003,33 @@ class AuthHandler(BaseHandler):
else:
return defer.succeed(False)
+ def ratelimit_login_per_account(self, user_id):
+ """Checks whether the process must be stopped because of ratelimiting.
+
+ Checks against two ratelimiters: the generic one for login attempts per
+ account and the one specific to failed attempts.
+
+ Args:
+ user_id (unicode): complete @user:id
+
+ Raises:
+ LimitExceededError if one of the ratelimiters' login requests count
+ for this user is too high too proceed.
+ """
+ self._failed_attempts_ratelimiter.ratelimit(
+ user_id.lower(), time_now_s=self._clock.time(),
+ rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
+ burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
+ update=False,
+ )
+
+ self._account_ratelimiter.ratelimit(
+ user_id.lower(), time_now_s=self._clock.time(),
+ rate_hz=self.hs.config.rc_login_account.per_second,
+ burst_count=self.hs.config.rc_login_account.burst_count,
+ update=True,
+ )
+
@attr.s
class MacaroonGenerator(object):
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 75fe50c..6a91f76 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -43,12 +43,15 @@ class DeactivateAccountHandler(BaseHandler):
hs.get_reactor().callWhenRunning(self._start_user_parting)
@defer.inlineCallbacks
- def deactivate_account(self, user_id, erase_data):
+ def deactivate_account(self, user_id, erase_data, id_server=None):
"""Deactivate a user's account
Args:
user_id (str): ID of user to be deactivated
erase_data (bool): whether to GDPR-erase the user's data
+ id_server (str|None): Use the given identity server when unbinding
+ any threepids. If None then will attempt to unbind using the
+ identity server specified when binding (if known).
Returns:
Deferred[bool]: True if identity server supports removing
@@ -74,6 +77,7 @@ class DeactivateAccountHandler(BaseHandler):
{
'medium': threepid['medium'],
'address': threepid['address'],
+ 'id_server': id_server,
},
)
identity_server_supports_unbinding &= result
@@ -164,6 +168,7 @@ class DeactivateAccountHandler(BaseHandler):
room_id,
"leave",
ratelimit=False,
+ require_consent=False,
)
except Exception:
logger.exception(
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index c708c35..b398848 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -37,13 +37,185 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
-class DeviceHandler(BaseHandler):
+class DeviceWorkerHandler(BaseHandler):
def __init__(self, hs):
- super(DeviceHandler, self).__init__(hs)
+ super(DeviceWorkerHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
self._auth_handler = hs.get_auth_handler()
+
+ @defer.inlineCallbacks
+ def get_devices_by_user(self, user_id):
+ """
+ Retrieve the given user's devices
+
+ Args:
+ user_id (str):
+ Returns:
+ defer.Deferred: list[dict[str, X]]: info on each device
+ """
+
+ device_map = yield self.store.get_devices_by_user(user_id)
+
+ ips = yield self.store.get_last_client_ip_by_device(
+ user_id, device_id=None
+ )
+
+ devices = list(device_map.values())
+ for device in devices:
+ _update_device_from_client_ips(device, ips)
+
+ defer.returnValue(devices)
+
+ @defer.inlineCallbacks
+ def get_device(self, user_id, device_id):
+ """ Retrieve the given device
+
+ Args:
+ user_id (str):
+ device_id (str):
+
+ Returns:
+ defer.Deferred: dict[str, X]: info on the device
+ Raises:
+ errors.NotFoundError: if the device was not found
+ """
+ try:
+ device = yield self.store.get_device(user_id, device_id)
+ except errors.StoreError:
+ raise errors.NotFoundError
+ ips = yield self.store.get_last_client_ip_by_device(
+ user_id, device_id,
+ )
+ _update_device_from_client_ips(device, ips)
+ defer.returnValue(device)
+
+ @measure_func("device.get_user_ids_changed")
+ @defer.inlineCallbacks
+ def get_user_ids_changed(self, user_id, from_token):
+ """Get list of users that have had the devices updated, or have newly
+ joined a room, that `user_id` may be interested in.
+
+ Args:
+ user_id (str)
+ from_token (StreamToken)
+ """
+ now_room_key = yield self.store.get_room_events_max_id()
+
+ room_ids = yield self.store.get_rooms_for_user(user_id)
+
+ # First we check if any devices have changed
+ changed = yield self.store.get_user_whose_devices_changed(
+ from_token.device_list_key
+ )
+
+ # Then work out if any users have since joined
+ rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
+
+ member_events = yield self.store.get_membership_changes_for_user(
+ user_id, from_token.room_key, now_room_key,
+ )
+ rooms_changed.update(event.room_id for event in member_events)
+
+ stream_ordering = RoomStreamToken.parse_stream_token(
+ from_token.room_key
+ ).stream
+
+ possibly_changed = set(changed)
+ possibly_left = set()
+ for room_id in rooms_changed:
+ current_state_ids = yield self.store.get_current_state_ids(room_id)
+
+ # The user may have left the room
+ # TODO: Check if they actually did or if we were just invited.
+ if room_id not in room_ids:
+ for key, event_id in iteritems(current_state_ids):
+ etype, state_key = key
+ if etype != EventTypes.Member:
+ continue
+ possibly_left.add(state_key)
+ continue
+
+ # Fetch the current state at the time.
+ try:
+ event_ids = yield self.store.get_forward_extremeties_for_room(
+ room_id, stream_ordering=stream_ordering
+ )
+ except errors.StoreError:
+ # we have purged the stream_ordering index since the stream
+ # ordering: treat it the same as a new room
+ event_ids = []
+
+ # special-case for an empty prev state: include all members
+ # in the changed list
+ if not event_ids:
+ for key, event_id in iteritems(current_state_ids):
+ etype, state_key = key
+ if etype != EventTypes.Member:
+ continue
+ possibly_changed.add(state_key)
+ continue
+
+ current_member_id = current_state_ids.get((EventTypes.Member, user_id))
+ if not current_member_id:
+ continue
+
+ # mapping from event_id -> state_dict
+ prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
+
+ # Check if we've joined the room? If so we just blindly add all the users to
+ # the "possibly changed" users.
+ for state_dict in itervalues(prev_state_ids):
+ member_event = state_dict.get((EventTypes.Member, user_id), None)
+ if not member_event or member_event != current_member_id:
+ for key, event_id in iteritems(current_state_ids):
+ etype, state_key = key
+ if etype != EventTypes.Member:
+ continue
+ possibly_changed.add(state_key)
+ break
+
+ # If there has been any change in membership, include them in the
+ # possibly changed list. We'll check if they are joined below,
+ # and we're not toooo worried about spuriously adding users.
+ for key, event_id in iteritems(current_state_ids):
+ etype, state_key = key
+ if etype != EventTypes.Member:
+ continue
+
+ # check if this member has changed since any of the extremities
+ # at the stream_ordering, and add them to the list if so.
+ for state_dict in itervalues(prev_state_ids):
+ prev_event_id = state_dict.get(key, None)
+ if not prev_event_id or prev_event_id != event_id:
+ if state_key != user_id:
+ possibly_changed.add(state_key)
+ break
+
+ if possibly_changed or possibly_left:
+ users_who_share_room = yield self.store.get_users_who_share_room_with_user(
+ user_id
+ )
+
+ # Take the intersection of the users whose devices may have changed
+ # and those that actually still share a room with the user
+ possibly_joined = possibly_changed & users_who_share_room
+ possibly_left = (possibly_changed | possibly_left) - users_who_share_room
+ else:
+ possibly_joined = []
+ possibly_left = []
+
+ defer.returnValue({
+ "changed": list(possibly_joined),
+ "left": list(possibly_left),
+ })
+
+
+class DeviceHandler(DeviceWorkerHandler):
+ def __init__(self, hs):
+ super(DeviceHandler, self).__init__(hs)
+
self.federation_sender = hs.get_federation_sender()
self._edu_updater = DeviceListEduUpdater(hs, self)
@@ -103,52 +275,6 @@ class DeviceHandler(BaseHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
- @defer.inlineCallbacks
- def get_devices_by_user(self, user_id):
- """
- Retrieve the given user's devices
-
- Args:
- user_id (str):
- Returns:
- defer.Deferred: list[dict[str, X]]: info on each device
- """
-
- device_map = yield self.store.get_devices_by_user(user_id)
-
- ips = yield self.store.get_last_client_ip_by_device(
- user_id, device_id=None
- )
-
- devices = list(device_map.values())
- for device in devices:
- _update_device_from_client_ips(device, ips)
-
- defer.returnValue(devices)
-
- @defer.inlineCallbacks
- def get_device(self, user_id, device_id):
- """ Retrieve the given device
-
- Args:
- user_id (str):
- device_id (str):
-
- Returns:
- defer.Deferred: dict[str, X]: info on the device
- Raises:
- errors.NotFoundError: if the device was not found
- """
- try:
- device = yield self.store.get_device(user_id, device_id)
- except errors.StoreError:
- raise errors.NotFoundError
- ips = yield self.store.get_last_client_ip_by_device(
- user_id, device_id,
- )
- _update_device_from_client_ips(device, ips)
- defer.returnValue(device)
-
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
@@ -276,6 +402,12 @@ class DeviceHandler(BaseHandler):
user_id, device_ids, list(hosts)
)
+ for device_id in device_ids:
+ logger.debug(
+ "Notifying about update %r/%r, ID: %r", user_id, device_id,
+ position,
+ )
+
room_ids = yield self.store.get_rooms_for_user(user_id)
yield self.notifier.on_new_event(
@@ -283,130 +415,10 @@ class DeviceHandler(BaseHandler):
)
if hosts:
- logger.info("Sending device list update notif to: %r", hosts)
+ logger.info("Sending device list update notif for %r to: %r", user_id, hosts)
for host in hosts:
self.federation_sender.send_device_messages(host)
- @measure_func("device.get_user_ids_changed")
- @defer.inlineCallbacks
- def get_user_ids_changed(self, user_id, from_token):
- """Get list of users that have had the devices updated, or have newly
- joined a room, that `user_id` may be interested in.
-
- Args:
- user_id (str)
- from_token (StreamToken)
- """
- now_token = yield self.hs.get_event_sources().get_current_token()
-
- room_ids = yield self.store.get_rooms_for_user(user_id)
-
- # First we check if any devices have changed
- changed = yield self.store.get_user_whose_devices_changed(
- from_token.device_list_key
- )
-
- # Then work out if any users have since joined
- rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
-
- member_events = yield self.store.get_membership_changes_for_user(
- user_id, from_token.room_key, now_token.room_key
- )
- rooms_changed.update(event.room_id for event in member_events)
-
- stream_ordering = RoomStreamToken.parse_stream_token(
- from_token.room_key
- ).stream
-
- possibly_changed = set(changed)
- possibly_left = set()
- for room_id in rooms_changed:
- current_state_ids = yield self.store.get_current_state_ids(room_id)
-
- # The user may have left the room
- # TODO: Check if they actually did or if we were just invited.
- if room_id not in room_ids:
- for key, event_id in iteritems(current_state_ids):
- etype, state_key = key
- if etype != EventTypes.Member:
- continue
- possibly_left.add(state_key)
- continue
-
- # Fetch the current state at the time.
- try:
- event_ids = yield self.store.get_forward_extremeties_for_room(
- room_id, stream_ordering=stream_ordering
- )
- except errors.StoreError:
- # we have purged the stream_ordering index since the stream
- # ordering: treat it the same as a new room
- event_ids = []
-
- # special-case for an empty prev state: include all members
- # in the changed list
- if not event_ids:
- for key, event_id in iteritems(current_state_ids):
- etype, state_key = key
- if etype != EventTypes.Member:
- continue
- possibly_changed.add(state_key)
- continue
-
- current_member_id = current_state_ids.get((EventTypes.Member, user_id))
- if not current_member_id:
- continue
-
- # mapping from event_id -> state_dict
- prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
-
- # Check if we've joined the room? If so we just blindly add all the users to
- # the "possibly changed" users.
- for state_dict in itervalues(prev_state_ids):
- member_event = state_dict.get((EventTypes.Member, user_id), None)
- if not member_event or member_event != current_member_id:
- for key, event_id in iteritems(current_state_ids):
- etype, state_key = key
- if etype != EventTypes.Member:
- continue
- possibly_changed.add(state_key)
- break
-
- # If there has been any change in membership, include them in the
- # possibly changed list. We'll check if they are joined below,
- # and we're not toooo worried about spuriously adding users.
- for key, event_id in iteritems(current_state_ids):
- etype, state_key = key
- if etype != EventTypes.Member:
- continue
-
- # check if this member has changed since any of the extremities
- # at the stream_ordering, and add them to the list if so.
- for state_dict in itervalues(prev_state_ids):
- prev_event_id = state_dict.get(key, None)
- if not prev_event_id or prev_event_id != event_id:
- if state_key != user_id:
- possibly_changed.add(state_key)
- break
-
- if possibly_changed or possibly_left:
- users_who_share_room = yield self.store.get_users_who_share_room_with_user(
- user_id
- )
-
- # Take the intersection of the users whose devices may have changed
- # and those that actually still share a room with the user
- possibly_joined = possibly_changed & users_who_share_room
- possibly_left = (possibly_changed | possibly_left) - users_who_share_room
- else:
- possibly_joined = []
- possibly_left = []
-
- defer.returnValue({
- "changed": list(possibly_joined),
- "left": list(possibly_left),
- })
-
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
@@ -473,15 +485,26 @@ class DeviceListEduUpdater(object):
if get_domain_from_id(user_id) != origin:
# TODO: Raise?
- logger.warning("Got device list update edu for %r from %r", user_id, origin)
+ logger.warning(
+ "Got device list update edu for %r/%r from %r",
+ user_id, device_id, origin,
+ )
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
+ logger.warning(
+ "Got device list update edu for %r/%r, but don't share a room",
+ user_id, device_id,
+ )
return
+ logger.debug(
+ "Received device list update for %r/%r", user_id, device_id,
+ )
+
self._pending_updates.setdefault(user_id, []).append(
(device_id, stream_id, prev_ids, edu_content)
)
@@ -499,10 +522,18 @@ class DeviceListEduUpdater(object):
# This can happen since we batch updates
return
+ for device_id, stream_id, prev_ids, content in pending_updates:
+ logger.debug(
+ "Handling update %r/%r, ID: %r, prev: %r ",
+ user_id, device_id, stream_id, prev_ids,
+ )
+
# Given a list of updates we check if we need to resync. This
# happens if we've missed updates.
resync = yield self._need_to_do_resync(user_id, pending_updates)
+ logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
+
if resync:
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
@@ -555,11 +586,21 @@ class DeviceListEduUpdater(object):
)
devices = []
+ for device in devices:
+ logger.debug(
+ "Handling resync update %r/%r, ID: %r",
+ user_id, device["device_id"], stream_id,
+ )
+
yield self.store.update_remote_device_list_cache(
user_id, devices, stream_id,
)
device_ids = [device["device_id"] for device in devices]
yield self.device_handler.notify_device_update(user_id, device_ids)
+
+ # We clobber the seen updates since we've re-synced from a given
+ # point.
+ self._seen_updates[user_id] = set([stream_id])
else:
# Simply update the single device, since we know that is the only
# change (because of the single prev_id matching the current cache)
@@ -572,9 +613,9 @@ class DeviceListEduUpdater(object):
user_id, [device_id for device_id, _, _, _ in pending_updates]
)
- self._seen_updates.setdefault(user_id, set()).update(
- stream_id for _, stream_id, _, _ in pending_updates
- )
+ self._seen_updates.setdefault(user_id, set()).update(
+ stream_id for _, stream_id, _, _ in pending_updates
+ )
@defer.inlineCallbacks
def _need_to_do_resync(self, user_id, updates):
@@ -587,6 +628,11 @@ class DeviceListEduUpdater(object):
user_id
)
+ logger.debug(
+ "Current extremity for %r: %r",
+ user_id, extremity,
+ )
+
stream_id_in_updates = set() # stream_ids in updates list
for _, stream_id, prev_ids, _ in updates:
if not prev_ids:
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 8b11330..a12f950 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -19,7 +19,7 @@ import string
from twisted.internet import defer
-from synapse.api.constants import EventTypes
+from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
from synapse.api.errors import (
AuthError,
CodeMessageException,
@@ -43,7 +43,10 @@ class DirectoryHandler(BaseHandler):
self.state = hs.get_state_handler()
self.appservice_handler = hs.get_application_service_handler()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.store = hs.get_datastore()
self.config = hs.config
+ self.enable_room_list_search = hs.config.enable_room_list_search
+ self.require_membership = hs.config.require_membership_for_aliases
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
@@ -67,7 +70,7 @@ class DirectoryHandler(BaseHandler):
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
- users = yield self.state.get_current_user_in_room(room_id)
+ users = yield self.state.get_current_users_in_room(room_id)
servers = set(get_domain_from_id(u) for u in users)
if not servers:
@@ -82,7 +85,7 @@ class DirectoryHandler(BaseHandler):
@defer.inlineCallbacks
def create_association(self, requester, room_alias, room_id, servers=None,
- send_event=True):
+ send_event=True, check_membership=True):
"""Attempt to create a new alias
Args:
@@ -92,6 +95,8 @@ class DirectoryHandler(BaseHandler):
servers (list[str]|None): List of servers that others servers
should try and join via
send_event (bool): Whether to send an updated m.room.aliases event
+ check_membership (bool): Whether to check if the user is in the room
+ before the alias can be set (if the server's config requires it).
Returns:
Deferred
@@ -99,6 +104,13 @@ class DirectoryHandler(BaseHandler):
user_id = requester.user.to_string()
+ if len(room_alias.to_string()) > MAX_ALIAS_LENGTH:
+ raise SynapseError(
+ 400,
+ "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,
+ Codes.INVALID_PARAM,
+ )
+
service = requester.app_service
if service:
if not service.is_interested_in_alias(room_alias.to_string()):
@@ -107,6 +119,14 @@ class DirectoryHandler(BaseHandler):
" this kind of alias.", errcode=Codes.EXCLUSIVE
)
else:
+ if self.require_membership and check_membership:
+ rooms_for_user = yield self.store.get_rooms_for_user(user_id)
+ if room_id not in rooms_for_user:
+ raise AuthError(
+ 403,
+ "You must be in the room to create an alias for it",
+ )
+
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
raise AuthError(
403, "This user is not permitted to create this alias",
@@ -267,7 +287,7 @@ class DirectoryHandler(BaseHandler):
Codes.NOT_FOUND
)
- users = yield self.state.get_current_user_in_room(room_id)
+ users = yield self.state.get_current_users_in_room(room_id)
extra_servers = set(get_domain_from_id(u) for u in users)
servers = set(extra_servers) | set(servers)
@@ -411,6 +431,13 @@ class DirectoryHandler(BaseHandler):
if visibility not in ["public", "private"]:
raise SynapseError(400, "Invalid visibility setting")
+ if visibility == "public" and not self.enable_room_list_search:
+ # The room list has been disabled.
+ raise AuthError(
+ 403,
+ "This user is not permitted to publish rooms to the room list"
+ )
+
room = yield self.store.get_room(room_id)
if room is None:
raise SynapseError(400, "Unknown room")
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index f772e62..6003ad9 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -19,9 +19,8 @@ import random
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import AuthError
+from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase
-from synapse.events.utils import serialize_event
from synapse.types import UserID
from synapse.util.logutils import log_function
from synapse.visibility import filter_events_for_client
@@ -50,6 +49,7 @@ class EventStreamHandler(BaseHandler):
self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
self._server_notices_sender = hs.get_server_notices_sender()
+ self._event_serializer = hs.get_event_client_serializer()
@defer.inlineCallbacks
@log_function
@@ -61,6 +61,11 @@ class EventStreamHandler(BaseHandler):
If `only_keys` is not None, events from keys will be sent down.
"""
+ if room_id:
+ blocked = yield self.store.is_room_blocked(room_id)
+ if blocked:
+ raise SynapseError(403, "This room has been blocked on this server")
+
# send any outstanding server notices to the user.
yield self._server_notices_sender.on_user_syncing(auth_user_id)
@@ -97,7 +102,7 @@ class EventStreamHandler(BaseHandler):
# Send down presence.
if event.state_key == auth_user_id:
# Send down presence for everyone in the room.
- users = yield self.state.get_current_user_in_room(event.room_id)
+ users = yield self.state.get_current_users_in_room(event.room_id)
states = yield presence_handler.get_states(
users,
as_event=True,
@@ -115,9 +120,9 @@ class EventStreamHandler(BaseHandler):
time_now = self.clock.time_msec()
- chunks = [
- serialize_event(e, time_now, as_client_event) for e in events
- ]
+ chunks = yield self._event_serializer.serialize_events(
+ events, time_now, as_client_event=as_client_event,
+ )
chunk = {
"chunk": chunks,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index f804861..2202ed6 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -29,13 +29,7 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
-from synapse.api.constants import (
- KNOWN_ROOM_VERSIONS,
- EventTypes,
- Membership,
- RejectedReason,
- RoomVersions,
-)
+from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.api.errors import (
AuthError,
CodeMessageException,
@@ -44,7 +38,9 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
+from synapse.event_auth import auth_types_for_event
from synapse.events.validator import EventValidator
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
@@ -858,6 +854,52 @@ class FederationHandler(BaseHandler):
logger.debug("Not backfilling as no extremeties found.")
return
+ # We only want to paginate if we can actually see the events we'll get,
+ # as otherwise we'll just spend a lot of resources to get redacted
+ # events.
+ #
+ # We do this by filtering all the backwards extremities and seeing if
+ # any remain. Given we don't have the extremity events themselves, we
+ # need to actually check the events that reference them.
+ #
+ # *Note*: the spec wants us to keep backfilling until we reach the start
+ # of the room in case we are allowed to see some of the history. However
+ # in practice that causes more issues than its worth, as a) its
+ # relatively rare for there to be any visible history and b) even when
+ # there is its often sufficiently long ago that clients would stop
+ # attempting to paginate before backfill reached the visible history.
+ #
+ # TODO: If we do do a backfill then we should filter the backwards
+ # extremities to only include those that point to visible portions of
+ # history.
+ #
+ # TODO: Correctly handle the case where we are allowed to see the
+ # forward event but not the backward extremity, e.g. in the case of
+ # initial join of the server where we are allowed to see the join
+ # event but not anything before it. This would require looking at the
+ # state *before* the event, ignoring the special casing certain event
+ # types have.
+
+ forward_events = yield self.store.get_successor_events(
+ list(extremities),
+ )
+
+ extremities_events = yield self.store.get_events(
+ forward_events,
+ check_redacted=False,
+ get_prev_content=False,
+ )
+
+ # We set `check_history_visibility_only` as we might otherwise get false
+ # positives from users having been erased.
+ filtered_extremities = yield filter_events_for_server(
+ self.store, self.server_name, list(extremities_events.values()),
+ redact=False, check_history_visibility_only=True,
+ )
+
+ if not filtered_extremities:
+ defer.returnValue(False)
+
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(
extremities.items(),
@@ -1582,6 +1624,7 @@ class FederationHandler(BaseHandler):
origin, event,
state=state,
auth_events=auth_events,
+ backfilled=backfilled,
)
# reraise does not allow inlineCallbacks to preserve the stacktrace, so we
@@ -1626,6 +1669,7 @@ class FederationHandler(BaseHandler):
event,
state=ev_info.get("state"),
auth_events=ev_info.get("auth_events"),
+ backfilled=backfilled,
)
defer.returnValue(res)
@@ -1684,7 +1728,9 @@ class FederationHandler(BaseHandler):
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
- room_version = create_event.content.get("room_version", RoomVersions.V1)
+ room_version = create_event.content.get(
+ "room_version", RoomVersions.V1.identifier,
+ )
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
@@ -1748,7 +1794,7 @@ class FederationHandler(BaseHandler):
)
@defer.inlineCallbacks
- def _prep_event(self, origin, event, state=None, auth_events=None):
+ def _prep_event(self, origin, event, state, auth_events, backfilled):
"""
Args:
@@ -1756,6 +1802,7 @@ class FederationHandler(BaseHandler):
event:
state:
auth_events:
+ backfilled (bool)
Returns:
Deferred, which resolves to synapse.events.snapshot.EventContext
@@ -1797,11 +1844,104 @@ class FederationHandler(BaseHandler):
context.rejected = RejectedReason.AUTH_ERROR
+ if not context.rejected:
+ yield self._check_for_soft_fail(event, state, backfilled)
+
if event.type == EventTypes.GuestAccess and not context.rejected:
yield self.maybe_kick_guest_users(event)
defer.returnValue(context)
+ @defer.inlineCallbacks
+ def _check_for_soft_fail(self, event, state, backfilled):
+ """Checks if we should soft fail the event, if so marks the event as
+ such.
+
+ Args:
+ event (FrozenEvent)
+ state (dict|None): The state at the event if we don't have all the
+ event's prev events
+ backfilled (bool): Whether the event is from backfill
+
+ Returns:
+ Deferred
+ """
+ # For new (non-backfilled and non-outlier) events we check if the event
+ # passes auth based on the current state. If it doesn't then we
+ # "soft-fail" the event.
+ do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier()
+ if do_soft_fail_check:
+ extrem_ids = yield self.store.get_latest_event_ids_in_room(
+ event.room_id,
+ )
+
+ extrem_ids = set(extrem_ids)
+ prev_event_ids = set(event.prev_event_ids())
+
+ if extrem_ids == prev_event_ids:
+ # If they're the same then the current state is the same as the
+ # state at the event, so no point rechecking auth for soft fail.
+ do_soft_fail_check = False
+
+ if do_soft_fail_check:
+ room_version = yield self.store.get_room_version(event.room_id)
+
+ # Calculate the "current state".
+ if state is not None:
+ # If we're explicitly given the state then we won't have all the
+ # prev events, and so we have a gap in the graph. In this case
+ # we want to be a little careful as we might have been down for
+ # a while and have an incorrect view of the current state,
+ # however we still want to do checks as gaps are easy to
+ # maliciously manufacture.
+ #
+ # So we use a "current state" that is actually a state
+ # resolution across the current forward extremities and the
+ # given state at the event. This should correctly handle cases
+ # like bans, especially with state res v2.
+
+ state_sets = yield self.store.get_state_groups(
+ event.room_id, extrem_ids,
+ )
+ state_sets = list(state_sets.values())
+ state_sets.append(state)
+ current_state_ids = yield self.state_handler.resolve_events(
+ room_version, state_sets, event,
+ )
+ current_state_ids = {
+ k: e.event_id for k, e in iteritems(current_state_ids)
+ }
+ else:
+ current_state_ids = yield self.state_handler.get_current_state_ids(
+ event.room_id, latest_event_ids=extrem_ids,
+ )
+
+ logger.debug(
+ "Doing soft-fail check for %s: state %s",
+ event.event_id, current_state_ids,
+ )
+
+ # Now check if event pass auth against said current state
+ auth_types = auth_types_for_event(event)
+ current_state_ids = [
+ e for k, e in iteritems(current_state_ids)
+ if k in auth_types
+ ]
+
+ current_auth_events = yield self.store.get_events(current_state_ids)
+ current_auth_events = {
+ (e.type, e.state_key): e for e in current_auth_events.values()
+ }
+
+ try:
+ self.auth.check(room_version, event, auth_events=current_auth_events)
+ except AuthError as e:
+ logger.warn(
+ "Soft-failing %r because %s",
+ event, e,
+ )
+ event.internal_metadata.soft_failed = True
+
@defer.inlineCallbacks
def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
missing):
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 39184f0..2246948 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -132,6 +132,14 @@ class IdentityHandler(BaseHandler):
}
)
logger.debug("bound threepid %r to %s", creds, mxid)
+
+ # Remember where we bound the threepid
+ yield self.store.add_user_bound_threepid(
+ user_id=mxid,
+ medium=data["medium"],
+ address=data["address"],
+ id_server=id_server,
+ )
except CodeMessageException as e:
data = json.loads(e.msg) # XXX WAT?
defer.returnValue(data)
@@ -142,30 +150,61 @@ class IdentityHandler(BaseHandler):
Args:
mxid (str): Matrix user ID of binding to be removed
- threepid (dict): Dict with medium & address of binding to be removed
+ threepid (dict): Dict with medium & address of binding to be
+ removed, and an optional id_server.
Raises:
SynapseError: If we failed to contact the identity server
Returns:
Deferred[bool]: True on success, otherwise False if the identity
- server doesn't support unbinding
+ server doesn't support unbinding (or no identity server found to
+ contact).
"""
- logger.debug("unbinding threepid %r from %s", threepid, mxid)
- if not self.trusted_id_servers:
- logger.warn("Can't unbind threepid: no trusted ID servers set in config")
+ if threepid.get("id_server"):
+ id_servers = [threepid["id_server"]]
+ else:
+ id_servers = yield self.store.get_id_servers_user_bound(
+ user_id=mxid,
+ medium=threepid["medium"],
+ address=threepid["address"],
+ )
+
+ # We don't know where to unbind, so we don't have a choice but to return
+ if not id_servers:
defer.returnValue(False)
- # We don't track what ID server we added 3pids on (perhaps we ought to)
- # but we assume that any of the servers in the trusted list are in the
- # same ID server federation, so we can pick any one of them to send the
- # deletion request to.
- id_server = next(iter(self.trusted_id_servers))
+ changed = True
+ for id_server in id_servers:
+ changed &= yield self.try_unbind_threepid_with_id_server(
+ mxid, threepid, id_server,
+ )
+
+ defer.returnValue(changed)
+
+ @defer.inlineCallbacks
+ def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server):
+ """Removes a binding from an identity server
+ Args:
+ mxid (str): Matrix user ID of binding to be removed
+ threepid (dict): Dict with medium & address of binding to be removed
+ id_server (str): Identity server to unbind from
+
+ Raises:
+ SynapseError: If we failed to contact the identity server
+
+ Returns:
+ Deferred[bool]: True on success, otherwise False if the identity
+ server doesn't support unbinding
+ """
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
content = {
"mxid": mxid,
- "threepid": threepid,
+ "threepid": {
+ "medium": threepid["medium"],
+ "address": threepid["address"],
+ },
}
# we abuse the federation http client to sign the request, but we have to send it
@@ -188,16 +227,24 @@ class IdentityHandler(BaseHandler):
content,
headers,
)
+ changed = True
except HttpResponseException as e:
+ changed = False
if e.code in (400, 404, 501,):
# The remote server probably doesn't support unbinding (yet)
logger.warn("Received %d response while unbinding threepid", e.code)
- defer.returnValue(False)
else:
logger.error("Failed to unbind threepid on identity server: %s", e)
raise SynapseError(502, "Failed to contact identity server")
- defer.returnValue(True)
+ yield self.store.remove_user_bound_threepid(
+ user_id=mxid,
+ medium=threepid["medium"],
+ address=threepid["address"],
+ id_server=id_server,
+ )
+
+ defer.returnValue(changed)
@defer.inlineCallbacks
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 563bb3c..aaee5db 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -18,8 +18,7 @@ import logging
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import AuthError, Codes
-from synapse.events.utils import serialize_event
+from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.events.validator import EventValidator
from synapse.handlers.presence import format_user_presence_state
from synapse.streams.config import PaginationConfig
@@ -43,6 +42,7 @@ class InitialSyncHandler(BaseHandler):
self.clock = hs.get_clock()
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
+ self._event_serializer = hs.get_event_client_serializer()
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
@@ -138,7 +138,9 @@ class InitialSyncHandler(BaseHandler):
d["inviter"] = event.sender
invite_event = yield self.store.get_event(event.event_id)
- d["invite"] = serialize_event(invite_event, time_now, as_client_event)
+ d["invite"] = yield self._event_serializer.serialize_event(
+ invite_event, time_now, as_client_event,
+ )
rooms_ret.append(d)
@@ -185,18 +187,21 @@ class InitialSyncHandler(BaseHandler):
time_now = self.clock.time_msec()
d["messages"] = {
- "chunk": [
- serialize_event(m, time_now, as_client_event)
- for m in messages
- ],
+ "chunk": (
+ yield self._event_serializer.serialize_events(
+ messages, time_now=time_now,
+ as_client_event=as_client_event,
+ )
+ ),
"start": start_token.to_string(),
"end": end_token.to_string(),
}
- d["state"] = [
- serialize_event(c, time_now, as_client_event)
- for c in current_state.values()
- ]
+ d["state"] = yield self._event_serializer.serialize_events(
+ current_state.values(),
+ time_now=time_now,
+ as_client_event=as_client_event
+ )
account_data_events = []
tags = tags_by_room.get(event.room_id)
@@ -262,6 +267,10 @@ class InitialSyncHandler(BaseHandler):
A JSON serialisable dict with the snapshot of the room.
"""
+ blocked = yield self.store.is_room_blocked(room_id)
+ if blocked:
+ raise SynapseError(403, "This room has been blocked on this server")
+
user_id = requester.user.to_string()
membership, member_event_id = yield self._check_in_room_or_world_readable(
@@ -333,11 +342,15 @@ class InitialSyncHandler(BaseHandler):
"membership": membership,
"room_id": room_id,
"messages": {
- "chunk": [serialize_event(m, time_now) for m in messages],
+ "chunk": (yield self._event_serializer.serialize_events(
+ messages, time_now,
+ )),
"start": start_token.to_string(),
"end": end_token.to_string(),
},
- "state": [serialize_event(s, time_now) for s in room_state.values()],
+ "state": (yield self._event_serializer.serialize_events(
+ room_state.values(), time_now,
+ )),
"presence": [],
"receipts": [],
})
@@ -351,10 +364,9 @@ class InitialSyncHandler(BaseHandler):
# TODO: These concurrently
time_now = self.clock.time_msec()
- state = [
- serialize_event(x, time_now)
- for x in current_state.values()
- ]
+ state = yield self._event_serializer.serialize_events(
+ current_state.values(), time_now,
+ )
now_token = yield self.hs.get_event_sources().get_current_token()
@@ -421,7 +433,9 @@ class InitialSyncHandler(BaseHandler):
ret = {
"room_id": room_id,
"messages": {
- "chunk": [serialize_event(m, time_now) for m in messages],
+ "chunk": (yield self._event_serializer.serialize_events(
+ messages, time_now,
+ )),
"start": start_token.to_string(),
"end": end_token.to_string(),
},
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 3981fe6..792edc7 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
from twisted.internet.defer import succeed
-from synapse.api.constants import EventTypes, Membership, RoomVersions
+from synapse.api.constants import EventTypes, Membership, RelationTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -30,8 +30,8 @@ from synapse.api.errors import (
NotFoundError,
SynapseError,
)
+from synapse.api.room_versions import RoomVersions
from synapse.api.urls import ConsentURIBuilder
-from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.state import StateFilter
@@ -56,6 +56,7 @@ class MessageHandler(object):
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
self.store = hs.get_datastore()
+ self._event_serializer = hs.get_event_client_serializer()
@defer.inlineCallbacks
def get_room_data(self, user_id=None, room_id=None,
@@ -163,9 +164,10 @@ class MessageHandler(object):
room_state = room_state[membership_event_id]
now = self.clock.time_msec()
- defer.returnValue(
- [serialize_event(c, now) for c in room_state.values()]
+ events = yield self._event_serializer.serialize_events(
+ room_state.values(), now,
)
+ defer.returnValue(events)
@defer.inlineCallbacks
def get_joined_members(self, requester, room_id):
@@ -191,7 +193,7 @@ class MessageHandler(object):
"Getting joined members after leaving is not implemented"
)
- users_with_profile = yield self.state.get_current_user_in_room(room_id)
+ users_with_profile = yield self.state.get_current_users_in_room(room_id)
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or because there
@@ -227,6 +229,7 @@ class EventCreationHandler(object):
self.ratelimiter = hs.get_ratelimiter()
self.notifier = hs.get_notifier()
self.config = hs.config
+ self.require_membership_for_aliases = hs.config.require_membership_for_aliases
self.send_event_to_master = ReplicationSendEventRestServlet.make_client(hs)
@@ -243,12 +246,19 @@ class EventCreationHandler(object):
self.spam_checker = hs.get_spam_checker()
- if self.config.block_events_without_consent_error is not None:
+ self._block_events_without_consent_error = (
+ self.config.block_events_without_consent_error
+ )
+
+ # we need to construct a ConsentURIBuilder here, as it checks that the necessary
+ # config options, but *only* if we have a configuration for which we are
+ # going to need it.
+ if self._block_events_without_consent_error:
self._consent_uri_builder = ConsentURIBuilder(self.config)
@defer.inlineCallbacks
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
- prev_events_and_hashes=None):
+ prev_events_and_hashes=None, require_consent=True):
"""
Given a dict from a client, create a new event.
@@ -269,6 +279,9 @@ class EventCreationHandler(object):
where *hashes* is a map from algorithm to hash.
If None, they will be requested from the database.
+
+ require_consent (bool): Whether to check if the requester has
+ consented to privacy policy.
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
@@ -310,7 +323,7 @@ class EventCreationHandler(object):
)
is_exempt = yield self._is_exempt_from_privacy_policy(builder, requester)
- if not is_exempt:
+ if require_consent and not is_exempt:
yield self.assert_accepted_privacy_policy(requester)
if token_id is not None:
@@ -325,6 +338,35 @@ class EventCreationHandler(object):
prev_events_and_hashes=prev_events_and_hashes,
)
+ # In an ideal world we wouldn't need the second part of this condition. However,
+ # this behaviour isn't spec'd yet, meaning we should be able to deactivate this
+ # behaviour. Another reason is that this code is also evaluated each time a new
+ # m.room.aliases event is created, which includes hitting a /directory route.
+ # Therefore not including this condition here would render the similar one in
+ # synapse.handlers.directory pointless.
+ if builder.type == EventTypes.Aliases and self.require_membership_for_aliases:
+ # Ideally we'd do the membership check in event_auth.check(), which
+ # describes a spec'd algorithm for authenticating events received over
+ # federation as well as those created locally. As of room v3, aliases events
+ # can be created by users that are not in the room, therefore we have to
+ # tolerate them in event_auth.check().
+ prev_state_ids = yield context.get_prev_state_ids(self.store)
+ prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
+ prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+ if not prev_event or prev_event.membership != Membership.JOIN:
+ logger.warning(
+ ("Attempt to send `m.room.aliases` in room %s by user %s but"
+ " membership is %s"),
+ event.room_id,
+ event.sender,
+ prev_event.membership if prev_event else None,
+ )
+
+ raise AuthError(
+ 403,
+ "You must be in the room to create an alias for it",
+ )
+
self.validator.validate_new(event)
defer.returnValue((event, context))
@@ -378,7 +420,7 @@ class EventCreationHandler(object):
Raises:
ConsentNotGivenError: if the user has not given consent yet
"""
- if self.config.block_events_without_consent_error is None:
+ if self._block_events_without_consent_error is None:
return
# exempt AS users from needing consent
@@ -405,7 +447,7 @@ class EventCreationHandler(object):
consent_uri = self._consent_uri_builder.build_user_consent_uri(
requester.user.localpart,
)
- msg = self.config.block_events_without_consent_error % {
+ msg = self._block_events_without_consent_error % {
'consent_uri': consent_uri,
}
raise ConsentNotGivenError(
@@ -436,10 +478,11 @@ class EventCreationHandler(object):
if event.is_state():
prev_state = yield self.deduplicate_state_event(event, context)
- logger.info(
- "Not bothering to persist duplicate state event %s", event.event_id,
- )
if prev_state is not None:
+ logger.info(
+ "Not bothering to persist state event %s duplicated by %s",
+ event.event_id, prev_state.event_id,
+ )
defer.returnValue(prev_state)
yield self.handle_new_client_event(
@@ -558,6 +601,20 @@ class EventCreationHandler(object):
self.validator.validate_new(event)
+ # If this event is an annotation then we check that that the sender
+ # can't annotate the same way twice (e.g. stops users from liking an
+ # event multiple times).
+ relation = event.content.get("m.relates_to", {})
+ if relation.get("rel_type") == RelationTypes.ANNOTATION:
+ relates_to = relation["event_id"]
+ aggregation_key = relation["key"]
+
+ already_exists = yield self.store.has_user_annotated_event(
+ relates_to, event.type, aggregation_key, event.sender,
+ )
+ if already_exists:
+ raise SynapseError(400, "Can't send same reaction twice")
+
logger.debug(
"Created event %s",
event.event_id,
@@ -592,7 +649,9 @@ class EventCreationHandler(object):
"""
if event.is_state() and (event.type, event.state_key) == (EventTypes.Create, ""):
- room_version = event.content.get("room_version", RoomVersions.V1)
+ room_version = event.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
else:
room_version = yield self.store.get_room_version(event.room_id)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index e4fdae9..8f811e2 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -20,7 +20,6 @@ from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
-from synapse.events.utils import serialize_event
from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import ReadWriteLock
@@ -78,6 +77,7 @@ class PaginationHandler(object):
self._purges_in_progress_by_room = set()
# map from purge id to PurgeStatus
self._purges_by_id = {}
+ self._event_serializer = hs.get_event_client_serializer()
def start_purge_history(self, room_id, token,
delete_local_events=False):
@@ -278,18 +278,22 @@ class PaginationHandler(object):
time_now = self.clock.time_msec()
chunk = {
- "chunk": [
- serialize_event(e, time_now, as_client_event)
- for e in events
- ],
+ "chunk": (
+ yield self._event_serializer.serialize_events(
+ events, time_now,
+ as_client_event=as_client_event,
+ )
+ ),
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
}
if state:
- chunk["state"] = [
- serialize_event(e, time_now, as_client_event)
- for e in state
- ]
+ chunk["state"] = (
+ yield self._event_serializer.serialize_events(
+ state, time_now,
+ as_client_event=as_client_event,
+ )
+ )
defer.returnValue(chunk)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index ba38566..59d53f1 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -31,9 +31,11 @@ from prometheus_client import Counter
from twisted.internet import defer
-from synapse.api.constants import PresenceState
+import synapse.metrics
+from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError
from synapse.metrics import LaterGauge
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
from synapse.types import UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
@@ -98,6 +100,7 @@ class PresenceHandler(object):
self.hs = hs
self.is_mine = hs.is_mine
self.is_mine_id = hs.is_mine_id
+ self.server_name = hs.hostname
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.wheel_timer = WheelTimer()
@@ -110,30 +113,6 @@ class PresenceHandler(object):
federation_registry.register_edu_handler(
"m.presence", self.incoming_presence
)
- federation_registry.register_edu_handler(
- "m.presence_invite",
- lambda origin, content: self.invite_presence(
- observed_user=UserID.from_string(content["observed_user"]),
- observer_user=UserID.from_string(content["observer_user"]),
- )
- )
- federation_registry.register_edu_handler(
- "m.presence_accept",
- lambda origin, content: self.accept_presence(
- observed_user=UserID.from_string(content["observed_user"]),
- observer_user=UserID.from_string(content["observer_user"]),
- )
- )
- federation_registry.register_edu_handler(
- "m.presence_deny",
- lambda origin, content: self.deny_presence(
- observed_user=UserID.from_string(content["observed_user"]),
- observer_user=UserID.from_string(content["observer_user"]),
- )
- )
-
- distributor = hs.get_distributor()
- distributor.observe("user_joined_room", self.user_joined_room)
active_presence = self.store.take_presence_startup_info()
@@ -220,6 +199,15 @@ class PresenceHandler(object):
LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [],
lambda: len(self.wheel_timer))
+ # Used to handle sending of presence to newly joined users/servers
+ if hs.config.use_presence:
+ self.notifier.add_replication_callback(self.notify_new_event)
+
+ # Presence is best effort and quickly heals itself, so lets just always
+ # stream from the current state when we restart.
+ self._event_pos = self.store.get_current_events_token()
+ self._event_processing = False
+
@defer.inlineCallbacks
def _on_shutdown(self):
"""Gets called when shutting down. This lets us persist any updates that
@@ -751,199 +739,178 @@ class PresenceHandler(object):
yield self._update_states([prev_state.copy_and_replace(**new_fields)])
@defer.inlineCallbacks
- def user_joined_room(self, user, room_id):
- """Called (via the distributor) when a user joins a room. This funciton
- sends presence updates to servers, either:
- 1. the joining user is a local user and we send their presence to
- all servers in the room.
- 2. the joining user is a remote user and so we send presence for all
- local users in the room.
+ def is_visible(self, observed_user, observer_user):
+ """Returns whether a user can see another user's presence.
"""
- # We only need to send presence to servers that don't have it yet. We
- # don't need to send to local clients here, as that is done as part
- # of the event stream/sync.
- # TODO: Only send to servers not already in the room.
- if self.is_mine(user):
- state = yield self.current_state_for_user(user.to_string())
-
- self._push_to_remotes([state])
- else:
- user_ids = yield self.store.get_users_in_room(room_id)
- user_ids = list(filter(self.is_mine_id, user_ids))
+ observer_room_ids = yield self.store.get_rooms_for_user(
+ observer_user.to_string()
+ )
+ observed_room_ids = yield self.store.get_rooms_for_user(
+ observed_user.to_string()
+ )
- states = yield self.current_state_for_users(user_ids)
+ if observer_room_ids & observed_room_ids:
+ defer.returnValue(True)
- self._push_to_remotes(list(states.values()))
+ defer.returnValue(False)
@defer.inlineCallbacks
- def get_presence_list(self, observer_user, accepted=None):
- """Returns the presence for all users in their presence list.
+ def get_all_presence_updates(self, last_id, current_id):
"""
- if not self.is_mine(observer_user):
- raise SynapseError(400, "User is not hosted on this Home Server")
-
- presence_list = yield self.store.get_presence_list(
- observer_user.localpart, accepted=accepted
- )
+ Gets a list of presence update rows from between the given stream ids.
+ Each row has:
+ - stream_id(str)
+ - user_id(str)
+ - state(str)
+ - last_active_ts(int)
+ - last_federation_update_ts(int)
+ - last_user_sync_ts(int)
+ - status_msg(int)
+ - currently_active(int)
+ """
+ # TODO(markjh): replicate the unpersisted changes.
+ # This could use the in-memory stores for recent changes.
+ rows = yield self.store.get_all_presence_updates(last_id, current_id)
+ defer.returnValue(rows)
- results = yield self.get_states(
- target_user_ids=[row["observed_user_id"] for row in presence_list],
- as_event=False,
- )
+ def notify_new_event(self):
+ """Called when new events have happened. Handles users and servers
+ joining rooms and require being sent presence.
+ """
- now = self.clock.time_msec()
- results[:] = [format_user_presence_state(r, now) for r in results]
+ if self._event_processing:
+ return
- is_accepted = {
- row["observed_user_id"]: row["accepted"] for row in presence_list
- }
+ @defer.inlineCallbacks
+ def _process_presence():
+ assert not self._event_processing
- for result in results:
- result.update({
- "accepted": is_accepted,
- })
+ self._event_processing = True
+ try:
+ yield self._unsafe_process()
+ finally:
+ self._event_processing = False
- defer.returnValue(results)
+ run_as_background_process("presence.notify_new_event", _process_presence)
@defer.inlineCallbacks
- def send_presence_invite(self, observer_user, observed_user):
- """Sends a presence invite.
- """
- yield self.store.add_presence_list_pending(
- observer_user.localpart, observed_user.to_string()
- )
+ def _unsafe_process(self):
+ # Loop round handling deltas until we're up to date
+ while True:
+ with Measure(self.clock, "presence_delta"):
+ deltas = yield self.store.get_current_state_deltas(self._event_pos)
+ if not deltas:
+ return
- if self.is_mine(observed_user):
- yield self.invite_presence(observed_user, observer_user)
- else:
- yield self.federation.send_edu(
- destination=observed_user.domain,
- edu_type="m.presence_invite",
- content={
- "observed_user": observed_user.to_string(),
- "observer_user": observer_user.to_string(),
- }
- )
+ yield self._handle_state_delta(deltas)
+
+ self._event_pos = deltas[-1]["stream_id"]
+
+ # Expose current event processing position to prometheus
+ synapse.metrics.event_processing_positions.labels("presence").set(
+ self._event_pos
+ )
@defer.inlineCallbacks
- def invite_presence(self, observed_user, observer_user):
- """Handles new presence invites.
+ def _handle_state_delta(self, deltas):
+ """Process current state deltas to find new joins that need to be
+ handled.
"""
- if not self.is_mine(observed_user):
- raise SynapseError(400, "User is not hosted on this Home Server")
+ for delta in deltas:
+ typ = delta["type"]
+ state_key = delta["state_key"]
+ room_id = delta["room_id"]
+ event_id = delta["event_id"]
+ prev_event_id = delta["prev_event_id"]
- # TODO: Don't auto accept
- if self.is_mine(observer_user):
- yield self.accept_presence(observed_user, observer_user)
- else:
- self.federation.send_edu(
- destination=observer_user.domain,
- edu_type="m.presence_accept",
- content={
- "observed_user": observed_user.to_string(),
- "observer_user": observer_user.to_string(),
- }
- )
+ logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
- state_dict = yield self.get_state(observed_user, as_event=False)
- state_dict = format_user_presence_state(state_dict, self.clock.time_msec())
+ if typ != EventTypes.Member:
+ continue
- self.federation.send_edu(
- destination=observer_user.domain,
- edu_type="m.presence",
- content={
- "push": [state_dict]
- }
- )
+ if event_id is None:
+ # state has been deleted, so this is not a join. We only care about
+ # joins.
+ continue
- @defer.inlineCallbacks
- def accept_presence(self, observed_user, observer_user):
- """Handles a m.presence_accept EDU. Mark a presence invite from a
- local or remote user as accepted in a local user's presence list.
- Starts polling for presence updates from the local or remote user.
- Args:
- observed_user(UserID): The user to update in the presence list.
- observer_user(UserID): The owner of the presence list to update.
- """
- yield self.store.set_presence_list_accepted(
- observer_user.localpart, observed_user.to_string()
- )
+ event = yield self.store.get_event(event_id)
+ if event.content.get("membership") != Membership.JOIN:
+ # We only care about joins
+ continue
- @defer.inlineCallbacks
- def deny_presence(self, observed_user, observer_user):
- """Handle a m.presence_deny EDU. Removes a local or remote user from a
- local user's presence list.
- Args:
- observed_user(UserID): The local or remote user to remove from the
- list.
- observer_user(UserID): The local owner of the presence list.
- Returns:
- A Deferred.
- """
- yield self.store.del_presence_list(
- observer_user.localpart, observed_user.to_string()
- )
+ if prev_event_id:
+ prev_event = yield self.store.get_event(prev_event_id)
+ if prev_event.content.get("membership") == Membership.JOIN:
+ # Ignore changes to join events.
+ continue
- # TODO(paul): Inform the user somehow?
+ yield self._on_user_joined_room(room_id, state_key)
@defer.inlineCallbacks
- def drop(self, observed_user, observer_user):
- """Remove a local or remote user from a local user's presence list and
- unsubscribe the local user from updates that user.
+ def _on_user_joined_room(self, room_id, user_id):
+ """Called when we detect a user joining the room via the current state
+ delta stream.
+
Args:
- observed_user(UserId): The local or remote user to remove from the
- list.
- observer_user(UserId): The local owner of the presence list.
+ room_id (str)
+ user_id (str)
+
Returns:
- A Deferred.
+ Deferred
"""
- if not self.is_mine(observer_user):
- raise SynapseError(400, "User is not hosted on this Home Server")
- yield self.store.del_presence_list(
- observer_user.localpart, observed_user.to_string()
- )
+ if self.is_mine_id(user_id):
+ # If this is a local user then we need to send their presence
+ # out to hosts in the room (who don't already have it)
- # TODO: Inform the remote that we've dropped the presence list.
+ # TODO: We should be able to filter the hosts down to those that
+ # haven't previously seen the user
- @defer.inlineCallbacks
- def is_visible(self, observed_user, observer_user):
- """Returns whether a user can see another user's presence.
- """
- observer_room_ids = yield self.store.get_rooms_for_user(
- observer_user.to_string()
- )
- observed_room_ids = yield self.store.get_rooms_for_user(
- observed_user.to_string()
- )
+ state = yield self.current_state_for_user(user_id)
+ hosts = yield self.state.get_current_hosts_in_room(room_id)
- if observer_room_ids & observed_room_ids:
- defer.returnValue(True)
+ # Filter out ourselves.
+ hosts = set(host for host in hosts if host != self.server_name)
- accepted_observers = yield self.store.get_presence_list_observers_accepted(
- observed_user.to_string()
- )
+ self.federation.send_presence_to_destinations(
+ states=[state],
+ destinations=hosts,
+ )
+ else:
+ # A remote user has joined the room, so we need to:
+ # 1. Check if this is a new server in the room
+ # 2. If so send any presence they don't already have for
+ # local users in the room.
- defer.returnValue(observer_user.to_string() in accepted_observers)
+ # TODO: We should be able to filter the users down to those that
+ # the server hasn't previously seen
- @defer.inlineCallbacks
- def get_all_presence_updates(self, last_id, current_id):
- """
- Gets a list of presence update rows from between the given stream ids.
- Each row has:
- - stream_id(str)
- - user_id(str)
- - state(str)
- - last_active_ts(int)
- - last_federation_update_ts(int)
- - last_user_sync_ts(int)
- - status_msg(int)
- - currently_active(int)
- """
- # TODO(markjh): replicate the unpersisted changes.
- # This could use the in-memory stores for recent changes.
- rows = yield self.store.get_all_presence_updates(last_id, current_id)
- defer.returnValue(rows)
+ # TODO: Check that this is actually a new server joining the
+ # room.
+
+ user_ids = yield self.state.get_current_users_in_room(room_id)
+ user_ids = list(filter(self.is_mine_id, user_ids))
+
+ states = yield self.current_state_for_users(user_ids)
+
+ # Filter out old presence, i.e. offline presence states where
+ # the user hasn't been active for a week. We can change this
+ # depending on what we want the UX to be, but at the least we
+ # should filter out offline presence where the state is just the
+ # default state.
+ now = self.clock.time_msec()
+ states = [
+ state for state in states.values()
+ if state.state != PresenceState.OFFLINE
+ or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
+ or state.status_msg is not None
+ ]
+
+ if states:
+ self.federation.send_presence_to_destinations(
+ states=states,
+ destinations=[get_domain_from_id(user_id)],
+ )
def should_notify(old_state, new_state):
@@ -1086,10 +1053,7 @@ class PresenceEventSource(object):
updates for
"""
user_id = user.to_string()
- plist = yield self.store.get_presence_list_accepted(
- user.localpart, on_invalidate=cache_context.invalidate,
- )
- users_interested_in = set(row["observed_user_id"] for row in plist)
+ users_interested_in = set()
users_interested_in.add(user_id) # So that we receive our own presence
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
@@ -1294,10 +1258,6 @@ def get_interested_parties(store, states):
for room_id in room_ids:
room_ids_to_states.setdefault(room_id, []).append(state)
- plist = yield store.get_presence_list_observers_accepted(state.user_id)
- for u in plist:
- users_to_states.setdefault(u, []).append(state)
-
# Always notify self
users_to_states.setdefault(state.user_id, []).append(state)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 1dfbde8..91fc718 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -53,6 +53,7 @@ class BaseProfileHandler(BaseHandler):
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
+
if self.hs.is_mine(target_user):
try:
displayname = yield self.store.get_profile_displayname(
@@ -147,8 +148,14 @@ class BaseProfileHandler(BaseHandler):
@defer.inlineCallbacks
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
- """target_user is the user whose displayname is to be changed;
- auth_user is the user attempting to make this change."""
+ """Set the displayname of a user
+
+ Args:
+ target_user (UserID): the user whose displayname is to be changed.
+ requester (Requester): The user attempting to make this change.
+ new_displayname (str): The displayname to give this user.
+ by_admin (bool): Whether this change was made by an administrator.
+ """
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
@@ -277,6 +284,48 @@ class BaseProfileHandler(BaseHandler):
room_id, str(e)
)
+ @defer.inlineCallbacks
+ def check_profile_query_allowed(self, target_user, requester=None):
+ """Checks whether a profile query is allowed. If the
+ 'require_auth_for_profile_requests' config flag is set to True and a
+ 'requester' is provided, the query is only allowed if the two users
+ share a room.
+
+ Args:
+ target_user (UserID): The owner of the queried profile.
+ requester (None|UserID): The user querying for the profile.
+
+ Raises:
+ SynapseError(403): The two users share no room, or ne user couldn't
+ be found to be in any room the server is in, and therefore the query
+ is denied.
+ """
+ # Implementation of MSC1301: don't allow looking up profiles if the
+ # requester isn't in the same room as the target. We expect requester to
+ # be None when this function is called outside of a profile query, e.g.
+ # when building a membership event. In this case, we must allow the
+ # lookup.
+ if not self.hs.config.require_auth_for_profile_requests or not requester:
+ return
+
+ try:
+ requester_rooms = yield self.store.get_rooms_for_user(
+ requester.to_string()
+ )
+ target_user_rooms = yield self.store.get_rooms_for_user(
+ target_user.to_string(),
+ )
+
+ # Check if the room lists have no elements in common.
+ if requester_rooms.isdisjoint(target_user_rooms):
+ raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
+ except StoreError as e:
+ if e.code == 404:
+ # This likely means that one of the users doesn't exist,
+ # so we act as if we couldn't find the profile.
+ raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
+ raise
+
class MasterProfileHandler(BaseProfileHandler):
PROFILE_UPDATE_MS = 60 * 1000
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 6964697..274d294 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -16,10 +16,8 @@ import logging
from twisted.internet import defer
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import get_domain_from_id
-
-from ._base import BaseHandler
+from synapse.handlers._base import BaseHandler
+from synapse.types import ReadReceipt
logger = logging.getLogger(__name__)
@@ -38,43 +36,18 @@ class ReceiptsHandler(BaseHandler):
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
- @defer.inlineCallbacks
- def received_client_receipt(self, room_id, receipt_type, user_id,
- event_id):
- """Called when a client tells us a local user has read up to the given
- event_id in the room.
- """
- receipt = {
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- "event_ids": [event_id],
- "data": {
- "ts": int(self.clock.time_msec()),
- }
- }
-
- is_new = yield self._handle_new_receipts([receipt])
-
- if is_new:
- # fire off a process in the background to send the receipt to
- # remote servers
- run_as_background_process(
- 'push_receipts_to_remotes', self._push_remotes, receipt
- )
-
@defer.inlineCallbacks
def _received_remote_receipt(self, origin, content):
"""Called when we receive an EDU of type m.receipt from a remote HS.
"""
receipts = [
- {
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- "event_ids": user_values["event_ids"],
- "data": user_values.get("data", {}),
- }
+ ReadReceipt(
+ room_id=room_id,
+ receipt_type=receipt_type,
+ user_id=user_id,
+ event_ids=user_values["event_ids"],
+ data=user_values.get("data", {}),
+ )
for room_id, room_values in content.items()
for receipt_type, users in room_values.items()
for user_id, user_values in users.items()
@@ -90,14 +63,12 @@ class ReceiptsHandler(BaseHandler):
max_batch_id = None
for receipt in receipts:
- room_id = receipt["room_id"]
- receipt_type = receipt["receipt_type"]
- user_id = receipt["user_id"]
- event_ids = receipt["event_ids"]
- data = receipt["data"]
-
res = yield self.store.insert_receipt(
- room_id, receipt_type, user_id, event_ids, data
+ receipt.room_id,
+ receipt.receipt_type,
+ receipt.user_id,
+ receipt.event_ids,
+ receipt.data,
)
if not res:
@@ -115,7 +86,7 @@ class ReceiptsHandler(BaseHandler):
# no new receipts
defer.returnValue(False)
- affected_room_ids = list(set([r["room_id"] for r in receipts]))
+ affected_room_ids = list(set([r.room_id for r in receipts]))
self.notifier.on_new_event(
"receipt_key", max_batch_id, rooms=affected_room_ids
@@ -128,43 +99,26 @@ class ReceiptsHandler(BaseHandler):
defer.returnValue(True)
@defer.inlineCallbacks
- def _push_remotes(self, receipt):
- """Given a receipt, works out which remote servers should be
- poked and pokes them.
+ def received_client_receipt(self, room_id, receipt_type, user_id,
+ event_id):
+ """Called when a client tells us a local user has read up to the given
+ event_id in the room.
"""
- try:
- # TODO: optimise this to move some of the work to the workers.
- room_id = receipt["room_id"]
- receipt_type = receipt["receipt_type"]
- user_id = receipt["user_id"]
- event_ids = receipt["event_ids"]
- data = receipt["data"]
-
- users = yield self.state.get_current_user_in_room(room_id)
- remotedomains = set(get_domain_from_id(u) for u in users)
- remotedomains = remotedomains.copy()
- remotedomains.discard(self.server_name)
-
- logger.debug("Sending receipt to: %r", remotedomains)
-
- for domain in remotedomains:
- self.federation.send_edu(
- destination=domain,
- edu_type="m.receipt",
- content={
- room_id: {
- receipt_type: {
- user_id: {
- "event_ids": event_ids,
- "data": data,
- }
- }
- },
- },
- key=(room_id, receipt_type, user_id),
- )
- except Exception:
- logger.exception("Error pushing receipts to remote servers")
+ receipt = ReadReceipt(
+ room_id=room_id,
+ receipt_type=receipt_type,
+ user_id=user_id,
+ event_ids=[event_id],
+ data={
+ "ts": int(self.clock.time_msec()),
+ },
+ )
+
+ is_new = yield self._handle_new_receipts([receipt])
+ if not is_new:
+ return
+
+ yield self.federation.send_read_receipt(receipt)
@defer.inlineCallbacks
def get_receipts_for_room(self, room_id, to_key):
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index c0e0692..e83ee24 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -19,11 +19,13 @@ import logging
from twisted.internet import defer
from synapse import types
-from synapse.api.constants import LoginType
+from synapse.api.constants import MAX_USERID_LENGTH, LoginType
from synapse.api.errors import (
AuthError,
Codes,
+ ConsentNotGivenError,
InvalidCaptchaError,
+ LimitExceededError,
RegistrationError,
SynapseError,
)
@@ -60,6 +62,7 @@ class RegistrationHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
self.captcha_client = CaptchaServerHttpClient(hs)
self.identity_handler = self.hs.get_handlers().identity_handler
+ self.ratelimiter = hs.get_registration_ratelimiter()
self._next_generated_user_id = None
@@ -120,6 +123,15 @@ class RegistrationHandler(BaseHandler):
self.check_user_id_not_appservice_exclusive(user_id)
+ if len(user_id) > MAX_USERID_LENGTH:
+ raise SynapseError(
+ 400,
+ "User ID may not be longer than %s characters" % (
+ MAX_USERID_LENGTH,
+ ),
+ Codes.INVALID_USERNAME
+ )
+
users = yield self.store.get_users_by_id_case_insensitive(user_id)
if users:
if not guest_access_token:
@@ -149,6 +161,8 @@ class RegistrationHandler(BaseHandler):
threepid=None,
user_type=None,
default_display_name=None,
+ address=None,
+ bind_emails=[],
):
"""Registers a new client on the server.
@@ -167,6 +181,8 @@ class RegistrationHandler(BaseHandler):
api.constants.UserTypes, or None for a normal user.
default_display_name (unicode|None): if set, the new user's displayname
will be set to this. Defaults to 'localpart'.
+ address (str|None): the IP address used to perform the registration.
+ bind_emails (List[str]): list of emails to bind to this account.
Returns:
A tuple of (user_id, access_token).
Raises:
@@ -206,7 +222,7 @@ class RegistrationHandler(BaseHandler):
token = None
if generate_token:
token = self.macaroon_gen.generate_access_token(user_id)
- yield self._register_with_store(
+ yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
@@ -215,6 +231,7 @@ class RegistrationHandler(BaseHandler):
create_profile_with_displayname=default_display_name,
admin=admin,
user_type=user_type,
+ address=address,
)
if self.hs.config.user_directory_search_all_users:
@@ -238,12 +255,13 @@ class RegistrationHandler(BaseHandler):
if default_display_name is None:
default_display_name = localpart
try:
- yield self._register_with_store(
+ yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
make_guest=make_guest,
create_profile_with_displayname=default_display_name,
+ address=address,
)
except SynapseError:
# if user id is taken, just generate another
@@ -254,6 +272,21 @@ class RegistrationHandler(BaseHandler):
if not self.hs.config.user_consent_at_registration:
yield self._auto_join_rooms(user_id)
+ # Bind any specified emails to this account
+ current_time = self.hs.get_clock().time_msec()
+ for email in bind_emails:
+ # generate threepid dict
+ threepid_dict = {
+ "medium": "email",
+ "address": email,
+ "validated_at": current_time,
+ }
+
+ # Bind email to new account
+ yield self._register_email_threepid(
+ user_id, threepid_dict, None, False,
+ )
+
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -305,6 +338,10 @@ class RegistrationHandler(BaseHandler):
)
else:
yield self._join_user_to_room(fake_requester, r)
+ except ConsentNotGivenError as e:
+ # Technically not necessary to pull out this error though
+ # moving away from bare excepts is a good thing to do.
+ logger.error("Failed to join new user to %r: %r", r, e)
except Exception as e:
logger.error("Failed to join new user to %r: %r", r, e)
@@ -337,7 +374,7 @@ class RegistrationHandler(BaseHandler):
user_id, allowed_appservice=service
)
- yield self._register_with_store(
+ yield self.register_with_store(
user_id=user_id,
password_hash="",
appservice_id=service_id,
@@ -513,7 +550,7 @@ class RegistrationHandler(BaseHandler):
token = self.macaroon_gen.generate_access_token(user_id)
if need_register:
- yield self._register_with_store(
+ yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
@@ -590,10 +627,10 @@ class RegistrationHandler(BaseHandler):
ratelimit=False,
)
- def _register_with_store(self, user_id, token=None, password_hash=None,
- was_guest=False, make_guest=False, appservice_id=None,
- create_profile_with_displayname=None, admin=False,
- user_type=None):
+ def register_with_store(self, user_id, token=None, password_hash=None,
+ was_guest=False, make_guest=False, appservice_id=None,
+ create_profile_with_displayname=None, admin=False,
+ user_type=None, address=None):
"""Register user in the datastore.
Args:
@@ -612,10 +649,26 @@ class RegistrationHandler(BaseHandler):
admin (boolean): is an admin user?
user_type (str|None): type of user. One of the values from
api.constants.UserTypes, or None for a normal user.
+ address (str|None): the IP address used to perform the registration.
Returns:
Deferred
"""
+ # Don't rate limit for app services
+ if appservice_id is None and address is not None:
+ time_now = self.clock.time()
+
+ allowed, time_allowed = self.ratelimiter.can_do_action(
+ address, time_now_s=time_now,
+ rate_hz=self.hs.config.rc_registration.per_second,
+ burst_count=self.hs.config.rc_registration.burst_count,
+ )
+
+ if not allowed:
+ raise LimitExceededError(
+ retry_after_ms=int(1000 * (time_allowed - time_now)),
+ )
+
if self.hs.config.worker_app:
return self._register_client(
user_id=user_id,
@@ -627,6 +680,7 @@ class RegistrationHandler(BaseHandler):
create_profile_with_displayname=create_profile_with_displayname,
admin=admin,
user_type=user_type,
+ address=address,
)
else:
return self.store.register(
@@ -693,9 +747,9 @@ class RegistrationHandler(BaseHandler):
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
bind_email (bool): Whether to bind the email with the identity
- server
+ server.
bind_msisdn (bool): Whether to bind the msisdn with the identity
- server
+ server.
"""
if self.hs.config.worker_app:
yield self._post_registration_client(
@@ -737,7 +791,7 @@ class RegistrationHandler(BaseHandler):
"""A user consented to the terms on registration
Args:
- user_id (str): The user ID that consented
+ user_id (str): The user ID that consented.
consent_version (str): version of the policy the user has
consented to.
"""
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 67b1569..e37ae96 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -25,14 +25,9 @@ from six import iteritems, string_types
from twisted.internet import defer
-from synapse.api.constants import (
- DEFAULT_ROOM_VERSION,
- KNOWN_ROOM_VERSIONS,
- EventTypes,
- JoinRules,
- RoomCreationPreset,
-)
+from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
+from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
@@ -285,6 +280,7 @@ class RoomCreationHandler(BaseHandler):
(EventTypes.RoomAvatar, ""),
(EventTypes.Encryption, ""),
(EventTypes.ServerACL, ""),
+ (EventTypes.RelatedGroups, ""),
)
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
@@ -406,7 +402,7 @@ class RoomCreationHandler(BaseHandler):
yield directory_handler.create_association(
requester, RoomAlias.from_string(alias),
new_room_id, servers=(self.hs.hostname, ),
- send_event=False,
+ send_event=False, check_membership=False,
)
logger.info("Moved alias %s to new room", alias)
except SynapseError as e:
@@ -479,7 +475,7 @@ class RoomCreationHandler(BaseHandler):
if ratelimit:
yield self.ratelimit(requester)
- room_version = config.get("room_version", DEFAULT_ROOM_VERSION)
+ room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier)
if not isinstance(room_version, string_types):
raise SynapseError(
400,
@@ -542,6 +538,7 @@ class RoomCreationHandler(BaseHandler):
room_alias=room_alias,
servers=[self.hs.hostname],
send_event=False,
+ check_membership=False,
)
preset_config = config.get(
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index afa508d..617d1c9 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -44,6 +44,7 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
class RoomListHandler(BaseHandler):
def __init__(self, hs):
super(RoomListHandler, self).__init__(hs)
+ self.enable_room_list_search = hs.config.enable_room_list_search
self.response_cache = ResponseCache(hs, "room_list")
self.remote_response_cache = ResponseCache(hs, "remote_room_list",
timeout_ms=30 * 1000)
@@ -66,10 +67,17 @@ class RoomListHandler(BaseHandler):
appservice and network id to use an appservice specific one.
Setting to None returns all public rooms across all lists.
"""
+ if not self.enable_room_list_search:
+ return defer.succeed({
+ "chunk": [],
+ "total_room_count_estimate": 0,
+ })
+
logger.info(
"Getting public room list: limit=%r, since=%r, search=%r, network=%r",
limit, since_token, bool(search_filter), network_tuple,
)
+
if search_filter:
# We explicitly don't bother caching searches or requests for
# appservice specific lists.
@@ -159,7 +167,7 @@ class RoomListHandler(BaseHandler):
if not latest_event_ids:
return
- joined_users = yield self.state_handler.get_current_user_in_room(
+ joined_users = yield self.state_handler.get_current_users_in_room(
room_id, latest_event_ids,
)
@@ -441,6 +449,12 @@ class RoomListHandler(BaseHandler):
def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
search_filter=None, include_all_networks=False,
third_party_instance_id=None,):
+ if not self.enable_room_list_search:
+ defer.returnValue({
+ "chunk": [],
+ "total_room_count_estimate": 0,
+ })
+
if search_filter:
# We currently don't support searching across federation, so we have
# to do it manually without pagination
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 190ea2c..93ac986 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,6 +34,8 @@ from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
+from ._base import BaseHandler
+
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
@@ -70,6 +73,13 @@ class RoomMemberHandler(object):
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self._server_notices_mxid = self.config.server_notices_mxid
+ self._enable_lookup = hs.config.enable_3pid_lookup
+ self.allow_per_room_profiles = self.config.allow_per_room_profiles
+
+ # This is only used to get at ratelimit function, and
+ # maybe_kick_guest_users. It's fine there are multiple of these as
+ # it doesn't store state.
+ self.base_handler = BaseHandler(hs)
@abc.abstractmethod
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -160,6 +170,7 @@ class RoomMemberHandler(object):
txn_id=None,
ratelimit=True,
content=None,
+ require_consent=True,
):
user_id = target.to_string()
@@ -185,6 +196,7 @@ class RoomMemberHandler(object):
token_id=requester.access_token_id,
txn_id=txn_id,
prev_events_and_hashes=prev_events_and_hashes,
+ require_consent=require_consent,
)
# Check if this event matches the previous membership event for the user.
@@ -232,6 +244,10 @@ class RoomMemberHandler(object):
self.copy_room_tags_and_direct_to_room(
predecessor["room_id"], room_id, user_id,
)
+ # Move over old push rules
+ self.store.move_push_rules_from_room_to_room_for_user(
+ predecessor["room_id"], room_id, user_id,
+ )
elif event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = yield self.store.get_event(prev_member_event_id)
@@ -301,6 +317,7 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ require_consent=True,
):
key = (room_id,)
@@ -315,6 +332,7 @@ class RoomMemberHandler(object):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
+ require_consent=require_consent,
)
defer.returnValue(result)
@@ -331,6 +349,7 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ require_consent=True,
):
content_specified = bool(content)
if content is None:
@@ -340,6 +359,13 @@ class RoomMemberHandler(object):
# later on.
content = dict(content)
+ if not self.allow_per_room_profiles:
+ # Strip profile data, knowing that new profile data will be added to the
+ # event's content in event_creation_handler.create_event() using the target's
+ # global profile.
+ content.pop("displayname", None)
+ content.pop("avatar_url", None)
+
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
@@ -412,6 +438,9 @@ class RoomMemberHandler(object):
room_id, latest_event_ids=latest_event_ids,
)
+ # TODO: Refactor into dictionary of explicitly allowed transitions
+ # between old and new state, with specific error messages for some
+ # transitions and generic otherwise
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
if old_state_id:
old_state = yield self.store.get_event(old_state_id, allow_none=True)
@@ -437,6 +466,9 @@ class RoomMemberHandler(object):
if same_sender and same_membership and same_content:
defer.returnValue(old_state)
+ if old_membership in ["ban", "leave"] and action == "kick":
+ raise AuthError(403, "The target user is not in the room")
+
# we don't allow people to reject invites to the server notice
# room, but they can leave it once they are joined.
if (
@@ -450,6 +482,9 @@ class RoomMemberHandler(object):
"You cannot reject this invite",
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
)
+ else:
+ if action == "kick":
+ raise AuthError(403, "The target user is not in the room")
is_host_in_room = yield self._is_host_in_room(current_state_ids)
@@ -512,6 +547,7 @@ class RoomMemberHandler(object):
ratelimit=ratelimit,
prev_events_and_hashes=prev_events_and_hashes,
content=content,
+ require_consent=require_consent,
)
defer.returnValue(res)
@@ -683,6 +719,10 @@ class RoomMemberHandler(object):
Codes.FORBIDDEN,
)
+ # We need to rate limit *before* we send out any 3PID invites, so we
+ # can't just rely on the standard ratelimiting of events.
+ yield self.base_handler.ratelimit(requester)
+
invitee = yield self._lookup_3pid(
id_server, medium, address
)
@@ -719,6 +759,10 @@ class RoomMemberHandler(object):
Returns:
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
+ if not self._enable_lookup:
+ raise SynapseError(
+ 403, "Looking up third-party identifiers is denied from this server",
+ )
try:
data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
@@ -900,7 +944,7 @@ class RoomMemberHandler(object):
}
if self.config.invite_3pid_guest:
- guest_access_token, guest_user_id = yield self.get_or_register_3pid_guest(
+ guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
requester=requester,
medium=medium,
address=address,
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 49c4393..9bba74d 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -23,7 +23,6 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
-from synapse.events.utils import serialize_event
from synapse.storage.state import StateFilter
from synapse.visibility import filter_events_for_client
@@ -36,6 +35,7 @@ class SearchHandler(BaseHandler):
def __init__(self, hs):
super(SearchHandler, self).__init__(hs)
+ self._event_serializer = hs.get_event_client_serializer()
@defer.inlineCallbacks
def get_old_rooms_from_upgraded_room(self, room_id):
@@ -401,14 +401,16 @@ class SearchHandler(BaseHandler):
time_now = self.clock.time_msec()
for context in contexts.values():
- context["events_before"] = [
- serialize_event(e, time_now)
- for e in context["events_before"]
- ]
- context["events_after"] = [
- serialize_event(e, time_now)
- for e in context["events_after"]
- ]
+ context["events_before"] = (
+ yield self._event_serializer.serialize_events(
+ context["events_before"], time_now,
+ )
+ )
+ context["events_after"] = (
+ yield self._event_serializer.serialize_events(
+ context["events_after"], time_now,
+ )
+ )
state_results = {}
if include_state:
@@ -422,14 +424,13 @@ class SearchHandler(BaseHandler):
# We're now about to serialize the events. We should not make any
# blocking calls after this. Otherwise the 'age' will be wrong
- results = [
- {
+ results = []
+ for e in allowed_events:
+ results.append({
"rank": rank_map[e.event_id],
- "result": serialize_event(e, time_now),
+ "result": (yield self._event_serializer.serialize_event(e, time_now)),
"context": contexts.get(e.event_id, {}),
- }
- for e in allowed_events
- ]
+ })
rooms_cat_res = {
"results": results,
@@ -438,10 +439,13 @@ class SearchHandler(BaseHandler):
}
if state_results:
- rooms_cat_res["state"] = {
- room_id: [serialize_event(e, time_now) for e in state]
- for room_id, state in state_results.items()
- }
+ s = {}
+ for room_id, state in state_results.items():
+ s[room_id] = yield self._event_serializer.serialize_events(
+ state, time_now,
+ )
+
+ rooms_cat_res["state"] = s
if room_groups and "room_id" in group_keys:
rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups
diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py
new file mode 100644
index 0000000..b268bbc
--- /dev/null
+++ b/synapse/handlers/state_deltas.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from twisted.internet import defer
+
+logger = logging.getLogger(__name__)
+
+
+class StateDeltasHandler(object):
+
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
+ """Given two events check if the `key_name` field in content changed
+ from not matching `public_value` to doing so.
+
+ For example, check if `history_visibility` (`key_name`) changed from
+ `shared` to `world_readable` (`public_value`).
+
+ Returns:
+ None if the field in the events either both match `public_value`
+ or if neither do, i.e. there has been no change.
+ True if it didnt match `public_value` but now does
+ False if it did match `public_value` but now doesn't
+ """
+ prev_event = None
+ event = None
+ if prev_event_id:
+ prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
+
+ if event_id:
+ event = yield self.store.get_event(event_id, allow_none=True)
+
+ if not event and not prev_event:
+ logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
+ defer.returnValue(None)
+
+ prev_value = None
+ value = None
+
+ if prev_event:
+ prev_value = prev_event.content.get(key_name)
+
+ if event:
+ value = event.content.get(key_name)
+
+ logger.debug("prev_value: %r -> value: %r", prev_value, value)
+
+ if value == public_value and prev_value != public_value:
+ defer.returnValue(True)
+ elif value != public_value and prev_value == public_value:
+ defer.returnValue(False)
+ else:
+ defer.returnValue(None)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index bd97241..1ee9a6e 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -39,6 +39,9 @@ from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
+# Debug logger for https://github.com/matrix-org/synapse/issues/4422
+issue4422_logger = logging.getLogger("synapse.handler.sync.4422_debug")
+
# Counts the number of times we returned a non-empty sync. `type` is one of
# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is
@@ -931,7 +934,7 @@ class SyncHandler(object):
res = yield self._generate_sync_entry_for_rooms(
sync_result_builder, account_data_by_room
)
- newly_joined_rooms, newly_joined_users, _, _ = res
+ newly_joined_rooms, newly_joined_or_invited_users, _, _ = res
_, _, newly_left_rooms, newly_left_users = res
block_all_presence_data = (
@@ -940,7 +943,7 @@ class SyncHandler(object):
)
if self.hs_config.use_presence and not block_all_presence_data:
yield self._generate_sync_entry_for_presence(
- sync_result_builder, newly_joined_rooms, newly_joined_users
+ sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
)
yield self._generate_sync_entry_for_to_device(sync_result_builder)
@@ -948,7 +951,7 @@ class SyncHandler(object):
device_lists = yield self._generate_sync_entry_for_device_list(
sync_result_builder,
newly_joined_rooms=newly_joined_rooms,
- newly_joined_users=newly_joined_users,
+ newly_joined_or_invited_users=newly_joined_or_invited_users,
newly_left_rooms=newly_left_rooms,
newly_left_users=newly_left_users,
)
@@ -962,6 +965,15 @@ class SyncHandler(object):
yield self._generate_sync_entry_for_groups(sync_result_builder)
+ # debug for https://github.com/matrix-org/synapse/issues/4422
+ for joined_room in sync_result_builder.joined:
+ room_id = joined_room.room_id
+ if room_id in newly_joined_rooms:
+ issue4422_logger.debug(
+ "Sync result for newly joined room %s: %r",
+ room_id, joined_room,
+ )
+
defer.returnValue(SyncResult(
presence=sync_result_builder.presence,
account_data=sync_result_builder.account_data,
@@ -1024,7 +1036,8 @@ class SyncHandler(object):
@measure_func("_generate_sync_entry_for_device_list")
@defer.inlineCallbacks
def _generate_sync_entry_for_device_list(self, sync_result_builder,
- newly_joined_rooms, newly_joined_users,
+ newly_joined_rooms,
+ newly_joined_or_invited_users,
newly_left_rooms, newly_left_users):
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
@@ -1037,16 +1050,16 @@ class SyncHandler(object):
# TODO: Be more clever than this, i.e. remove users who we already
# share a room with?
for room_id in newly_joined_rooms:
- joined_users = yield self.state.get_current_user_in_room(room_id)
- newly_joined_users.update(joined_users)
+ joined_users = yield self.state.get_current_users_in_room(room_id)
+ newly_joined_or_invited_users.update(joined_users)
for room_id in newly_left_rooms:
- left_users = yield self.state.get_current_user_in_room(room_id)
+ left_users = yield self.state.get_current_users_in_room(room_id)
newly_left_users.update(left_users)
# TODO: Check that these users are actually new, i.e. either they
# weren't in the previous sync *or* they left and rejoined.
- changed.update(newly_joined_users)
+ changed.update(newly_joined_or_invited_users)
if not changed and not newly_left_users:
defer.returnValue(DeviceLists(
@@ -1164,7 +1177,7 @@ class SyncHandler(object):
@defer.inlineCallbacks
def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
- newly_joined_users):
+ newly_joined_or_invited_users):
"""Generates the presence portion of the sync response. Populates the
`sync_result_builder` with the result.
@@ -1172,8 +1185,9 @@ class SyncHandler(object):
sync_result_builder(SyncResultBuilder)
newly_joined_rooms(list): List of rooms that the user has joined
since the last sync (or empty if an initial sync)
- newly_joined_users(list): List of users that have joined rooms
- since the last sync (or empty if an initial sync)
+ newly_joined_or_invited_users(list): List of users that have joined
+ or been invited to rooms since the last sync (or empty if an initial
+ sync)
"""
now_token = sync_result_builder.now_token
sync_config = sync_result_builder.sync_config
@@ -1199,9 +1213,9 @@ class SyncHandler(object):
"presence_key", presence_key
)
- extra_users_ids = set(newly_joined_users)
+ extra_users_ids = set(newly_joined_or_invited_users)
for room_id in newly_joined_rooms:
- users = yield self.state.get_current_user_in_room(room_id)
+ users = yield self.state.get_current_users_in_room(room_id)
extra_users_ids.update(users)
extra_users_ids.discard(user.to_string())
@@ -1231,7 +1245,8 @@ class SyncHandler(object):
Returns:
Deferred(tuple): Returns a 4-tuple of
- `(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)`
+ `(newly_joined_rooms, newly_joined_or_invited_users,
+ newly_left_rooms, newly_left_users)`
"""
user_id = sync_result_builder.sync_config.user.to_string()
block_all_room_ephemeral = (
@@ -1302,8 +1317,8 @@ class SyncHandler(object):
sync_result_builder.invited.extend(invited)
- # Now we want to get any newly joined users
- newly_joined_users = set()
+ # Now we want to get any newly joined or invited users
+ newly_joined_or_invited_users = set()
newly_left_users = set()
if since_token:
for joined_sync in sync_result_builder.joined:
@@ -1312,19 +1327,22 @@ class SyncHandler(object):
)
for event in it:
if event.type == EventTypes.Member:
- if event.membership == Membership.JOIN:
- newly_joined_users.add(event.state_key)
+ if (
+ event.membership == Membership.JOIN or
+ event.membership == Membership.INVITE
+ ):
+ newly_joined_or_invited_users.add(event.state_key)
else:
prev_content = event.unsigned.get("prev_content", {})
prev_membership = prev_content.get("membership", None)
if prev_membership == Membership.JOIN:
newly_left_users.add(event.state_key)
- newly_left_users -= newly_joined_users
+ newly_left_users -= newly_joined_or_invited_users
defer.returnValue((
newly_joined_rooms,
- newly_joined_users,
+ newly_joined_or_invited_users,
newly_left_rooms,
newly_left_users,
))
@@ -1369,7 +1387,7 @@ class SyncHandler(object):
where:
room_entries is a list [RoomSyncResultBuilder]
invited_rooms is a list [InvitedSyncResult]
- newly_joined rooms is a list[str] of room ids
+ newly_joined_rooms is a list[str] of room ids
newly_left_rooms is a list[str] of room ids
"""
user_id = sync_result_builder.sync_config.user.to_string()
@@ -1410,7 +1428,7 @@ class SyncHandler(object):
if room_id in sync_result_builder.joined_room_ids and non_joins:
# Always include if the user (re)joined the room, especially
# important so that device list changes are calculated correctly.
- # If there are non join member events, but we are still in the room,
+ # If there are non-join member events, but we are still in the room,
# then the user must have left and joined
newly_joined_rooms.append(room_id)
@@ -1425,6 +1443,17 @@ class SyncHandler(object):
old_mem_ev = yield self.store.get_event(
old_mem_ev_id, allow_none=True
)
+
+ # debug for #4422
+ if has_join:
+ prev_membership = None
+ if old_mem_ev:
+ prev_membership = old_mem_ev.membership
+ issue4422_logger.debug(
+ "Previous membership for room %s with join: %s (event %s)",
+ room_id, prev_membership, old_mem_ev_id,
+ )
+
if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
newly_joined_rooms.append(room_id)
@@ -1519,30 +1548,39 @@ class SyncHandler(object):
for room_id in sync_result_builder.joined_room_ids:
room_entry = room_to_events.get(room_id, None)
+ newly_joined = room_id in newly_joined_rooms
if room_entry:
events, start_key = room_entry
prev_batch_token = now_token.copy_and_replace("room_key", start_key)
- room_entries.append(RoomSyncResultBuilder(
+ entry = RoomSyncResultBuilder(
room_id=room_id,
rtype="joined",
events=events,
- newly_joined=room_id in newly_joined_rooms,
+ newly_joined=newly_joined,
full_state=False,
- since_token=None if room_id in newly_joined_rooms else since_token,
+ since_token=None if newly_joined else since_token,
upto_token=prev_batch_token,
- ))
+ )
else:
- room_entries.append(RoomSyncResultBuilder(
+ entry = RoomSyncResultBuilder(
room_id=room_id,
rtype="joined",
events=[],
- newly_joined=room_id in newly_joined_rooms,
+ newly_joined=newly_joined,
full_state=False,
since_token=since_token,
upto_token=since_token,
- ))
+ )
+
+ if newly_joined:
+ # debugging for https://github.com/matrix-org/synapse/issues/4422
+ issue4422_logger.debug(
+ "RoomSyncResultBuilder events for newly joined room %s: %r",
+ room_id, entry.events,
+ )
+ room_entries.append(entry)
defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms))
@@ -1663,6 +1701,13 @@ class SyncHandler(object):
newly_joined_room=newly_joined,
)
+ if newly_joined:
+ # debug for https://github.com/matrix-org/synapse/issues/4422
+ issue4422_logger.debug(
+ "Timeline events after filtering in newly-joined room %s: %r",
+ room_id, batch,
+ )
+
# When we join the room (or the client requests full_state), we should
# send down any existing tags. Usually the user won't have tags in a
# newly joined room, unless either a) they've joined before or b) the
@@ -1816,7 +1861,7 @@ class SyncHandler(object):
extrems = yield self.store.get_forward_extremeties_for_room(
room_id, stream_ordering,
)
- users_in_room = yield self.state.get_current_user_in_room(
+ users_in_room = yield self.state.get_current_users_in_room(
room_id, extrems,
)
if user_id in users_in_room:
@@ -1894,15 +1939,34 @@ def _calculate_state(
class SyncResultBuilder(object):
- "Used to help build up a new SyncResult for a user"
+ """Used to help build up a new SyncResult for a user
+
+ Attributes:
+ sync_config (SyncConfig)
+ full_state (bool)
+ since_token (StreamToken)
+ now_token (StreamToken)
+ joined_room_ids (list[str])
+
+ # The following mirror the fields in a sync response
+ presence (list)
+ account_data (list)
+ joined (list[JoinedSyncResult])
+ invited (list[InvitedSyncResult])
+ archived (list[ArchivedSyncResult])
+ device (list)
+ groups (GroupsSyncResult|None)
+ to_device (list)
+ """
def __init__(self, sync_config, full_state, since_token, now_token,
joined_room_ids):
"""
Args:
- sync_config(SyncConfig)
- full_state(bool): The full_state flag as specified by user
- since_token(StreamToken): The token supplied by user, or None.
- now_token(StreamToken): The token to sync up to.
+ sync_config (SyncConfig)
+ full_state (bool): The full_state flag as specified by user
+ since_token (StreamToken): The token supplied by user, or None.
+ now_token (StreamToken): The token to sync up to.
+ joined_room_ids (list[str]): List of rooms the user is joined to
"""
self.sync_config = sync_config
self.full_state = full_state
@@ -1930,8 +1994,8 @@ class RoomSyncResultBuilder(object):
Args:
room_id(str)
rtype(str): One of `"joined"` or `"archived"`
- events(list): List of events to include in the room, (more events
- may be added when generating result).
+ events(list[FrozenEvent]): List of events to include in the room
+ (more events may be added when generating result).
newly_joined(bool): If the user has newly joined the room
full_state(bool): Whether the full state should be sent in result
since_token(StreamToken): Earliest point to return events from, or None
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index a61bbf9..972662e 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -218,7 +218,7 @@ class TypingHandler(object):
@defer.inlineCallbacks
def _push_remote(self, member, typing):
try:
- users = yield self.state.get_current_user_in_room(member.room_id)
+ users = yield self.state.get_current_users_in_room(member.room_id)
self._member_last_federation_poke[member] = self.clock.time_msec()
now = self.clock.time_msec()
@@ -231,7 +231,7 @@ class TypingHandler(object):
for domain in set(get_domain_from_id(u) for u in users):
if domain != self.server_name:
logger.debug("sending typing update to %s", domain)
- self.federation.send_edu(
+ self.federation.build_and_send_edu(
destination=domain,
edu_type="m.typing",
content={
@@ -261,7 +261,7 @@ class TypingHandler(object):
)
return
- users = yield self.state.get_current_user_in_room(room_id)
+ users = yield self.state.get_current_users_in_room(room_id)
domains = set(get_domain_from_id(u) for u in users)
if self.server_name in domains:
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 283c6c1..5de9630 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -15,12 +15,13 @@
import logging
-from six import iteritems
+from six import iteritems, iterkeys
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.handlers.state_deltas import StateDeltasHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import get_localpart_from_id
@@ -29,7 +30,7 @@ from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
-class UserDirectoryHandler(object):
+class UserDirectoryHandler(StateDeltasHandler):
"""Handles querying of and keeping updated the user_directory.
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
@@ -38,19 +39,11 @@ class UserDirectoryHandler(object):
world_readable or publically joinable room. We keep a database table up to date
by streaming changes of the current state and recalculating whether users should
be in the directory or not when necessary.
-
- For each user in the directory we also store a room_id which is public and that the
- user is joined to. This allows us to ignore history_visibility and join_rules changes
- for that user in all other public rooms, as we know they'll still be in at least
- one public room.
"""
- INITIAL_ROOM_SLEEP_MS = 50
- INITIAL_ROOM_SLEEP_COUNT = 100
- INITIAL_ROOM_BATCH_SIZE = 100
- INITIAL_USER_SLEEP_MS = 10
-
def __init__(self, hs):
+ super(UserDirectoryHandler, self).__init__(hs)
+
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.server_name = hs.hostname
@@ -59,15 +52,6 @@ class UserDirectoryHandler(object):
self.is_mine_id = hs.is_mine_id
self.update_user_directory = hs.config.update_user_directory
self.search_all_users = hs.config.user_directory_search_all_users
-
- # When start up for the first time we need to populate the user_directory.
- # This is a set of user_id's we've inserted already
- self.initially_handled_users = set()
- self.initially_handled_users_in_public = set()
-
- self.initially_handled_users_share = set()
- self.initially_handled_users_share_private_room = set()
-
# The current position in the current_state_delta stream
self.pos = None
@@ -130,7 +114,7 @@ class UserDirectoryHandler(object):
# Support users are for diagnostics and should not appear in the user directory.
if not is_support:
yield self.store.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url, None
+ user_id, profile.display_name, profile.avatar_url
)
@defer.inlineCallbacks
@@ -140,7 +124,6 @@ class UserDirectoryHandler(object):
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
yield self.store.remove_from_user_dir(user_id)
- yield self.store.remove_from_user_in_public_room(user_id)
@defer.inlineCallbacks
def _unsafe_process(self):
@@ -148,10 +131,9 @@ class UserDirectoryHandler(object):
if self.pos is None:
self.pos = yield self.store.get_user_directory_stream_pos()
- # If still None then we need to do the initial fill of directory
+ # If still None then the initial background update hasn't happened yet
if self.pos is None:
- yield self._do_initial_spam()
- self.pos = yield self.store.get_user_directory_stream_pos()
+ defer.returnValue(None)
# Loop round handling deltas until we're up to date
while True:
@@ -172,149 +154,6 @@ class UserDirectoryHandler(object):
yield self.store.update_user_directory_stream_pos(self.pos)
- @defer.inlineCallbacks
- def _do_initial_spam(self):
- """Populates the user_directory from the current state of the DB, used
- when synapse first starts with user_directory support
- """
- new_pos = yield self.store.get_max_stream_id_in_current_state_deltas()
-
- # Delete any existing entries just in case there are any
- yield self.store.delete_all_from_user_dir()
-
- # We process by going through each existing room at a time.
- room_ids = yield self.store.get_all_rooms()
-
- logger.info("Doing initial update of user directory. %d rooms", len(room_ids))
- num_processed_rooms = 0
-
- for room_id in room_ids:
- logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
- yield self._handle_initial_room(room_id)
- num_processed_rooms += 1
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
-
- logger.info("Processed all rooms.")
-
- if self.search_all_users:
- num_processed_users = 0
- user_ids = yield self.store.get_all_local_users()
- logger.info(
- "Doing initial update of user directory. %d users", len(user_ids)
- )
- for user_id in user_ids:
- # We add profiles for all users even if they don't match the
- # include pattern, just in case we want to change it in future
- logger.info(
- "Handling user %d/%d", num_processed_users + 1, len(user_ids)
- )
- yield self._handle_local_user(user_id)
- num_processed_users += 1
- yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
-
- logger.info("Processed all users")
-
- self.initially_handled_users = None
- self.initially_handled_users_in_public = None
- self.initially_handled_users_share = None
- self.initially_handled_users_share_private_room = None
-
- yield self.store.update_user_directory_stream_pos(new_pos)
-
- @defer.inlineCallbacks
- def _handle_initial_room(self, room_id):
- """Called when we initially fill out user_directory one room at a time
- """
- is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
- if not is_in_room:
- return
-
- is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
- room_id
- )
-
- users_with_profile = yield self.state.get_current_user_in_room(room_id)
- user_ids = set(users_with_profile)
- unhandled_users = user_ids - self.initially_handled_users
-
- yield self.store.add_profiles_to_user_dir(
- room_id,
- {user_id: users_with_profile[user_id] for user_id in unhandled_users},
- )
-
- self.initially_handled_users |= unhandled_users
-
- if is_public:
- yield self.store.add_users_to_public_room(
- room_id, user_ids=user_ids - self.initially_handled_users_in_public
- )
- self.initially_handled_users_in_public |= user_ids
-
- # We now go and figure out the new users who share rooms with user entries
- # We sleep aggressively here as otherwise it can starve resources.
- # We also batch up inserts/updates, but try to avoid too many at once.
- to_insert = set()
- to_update = set()
- count = 0
- for user_id in user_ids:
- if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
-
- if not self.is_mine_id(user_id):
- count += 1
- continue
-
- if self.store.get_if_app_services_interested_in_user(user_id):
- count += 1
- continue
-
- for other_user_id in user_ids:
- if user_id == other_user_id:
- continue
-
- if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
- yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
- count += 1
-
- user_set = (user_id, other_user_id)
-
- if user_set in self.initially_handled_users_share_private_room:
- continue
-
- if user_set in self.initially_handled_users_share:
- if is_public:
- continue
- to_update.add(user_set)
- else:
- to_insert.add(user_set)
-
- if is_public:
- self.initially_handled_users_share.add(user_set)
- else:
- self.initially_handled_users_share_private_room.add(user_set)
-
- if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
- yield self.store.add_users_who_share_room(
- room_id, not is_public, to_insert
- )
- to_insert.clear()
-
- if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
- yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update
- )
- to_update.clear()
-
- if to_insert:
- yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
- to_insert.clear()
-
- if to_update:
- yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update
- )
- to_update.clear()
-
@defer.inlineCallbacks
def _handle_deltas(self, deltas):
"""Called with the state deltas to process
@@ -356,6 +195,7 @@ class UserDirectoryHandler(object):
user_ids = yield self.store.get_users_in_dir_due_to_room(
room_id
)
+
for user_id in user_ids:
yield self._handle_remove_user(room_id, user_id)
return
@@ -436,14 +276,20 @@ class UserDirectoryHandler(object):
# ignore the change
return
- if change:
- users_with_profile = yield self.state.get_current_user_in_room(room_id)
- for user_id, profile in iteritems(users_with_profile):
- yield self._handle_new_user(room_id, user_id, profile)
- else:
- users = yield self.store.get_users_in_public_due_to_room(room_id)
- for user_id in users:
- yield self._handle_remove_user(room_id, user_id)
+ users_with_profile = yield self.state.get_current_users_in_room(room_id)
+
+ # Remove every user from the sharing tables for that room.
+ for user_id in iterkeys(users_with_profile):
+ yield self.store.remove_user_who_share_room(user_id, room_id)
+
+ # Then, re-add them to the tables.
+ # NOTE: this is not the most efficient method, as handle_new_user sets
+ # up local_user -> other_user and other_user_whos_local -> local_user,
+ # which when ran over an entire room, will result in the same values
+ # being added multiple times. The batching upserts shouldn't make this
+ # too bad, though.
+ for user_id, profile in iteritems(users_with_profile):
+ yield self._handle_new_user(room_id, user_id, profile)
@defer.inlineCallbacks
def _handle_local_user(self, user_id):
@@ -457,7 +303,9 @@ class UserDirectoryHandler(object):
row = yield self.store.get_user_in_directory(user_id)
if not row:
- yield self.store.add_profiles_to_user_dir(None, {user_id: profile})
+ yield self.store.update_profile_in_user_dir(
+ user_id, profile.display_name, profile.avatar_url
+ )
@defer.inlineCallbacks
def _handle_new_user(self, room_id, user_id, profile):
@@ -469,177 +317,68 @@ class UserDirectoryHandler(object):
"""
logger.debug("Adding new user to dir, %r", user_id)
- row = yield self.store.get_user_in_directory(user_id)
- if not row:
- yield self.store.add_profiles_to_user_dir(room_id, {user_id: profile})
+ yield self.store.update_profile_in_user_dir(
+ user_id, profile.display_name, profile.avatar_url
+ )
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
room_id
)
+ # Now we update users who share rooms with users.
+ users_with_profile = yield self.state.get_current_users_in_room(room_id)
if is_public:
- row = yield self.store.get_user_in_public_room(user_id)
- if not row:
- yield self.store.add_users_to_public_room(room_id, [user_id])
+ yield self.store.add_users_in_public_rooms(room_id, (user_id,))
else:
- logger.debug("Not adding new user to public dir, %r", user_id)
-
- # Now we update users who share rooms with users. We do this by getting
- # all the current users in the room and seeing which aren't already
- # marked in the database as sharing with `user_id`
+ to_insert = set()
- users_with_profile = yield self.state.get_current_user_in_room(room_id)
+ # First, if they're our user then we need to update for every user
+ if self.is_mine_id(user_id):
- to_insert = set()
- to_update = set()
+ is_appservice = self.store.get_if_app_services_interested_in_user(
+ user_id
+ )
- is_appservice = self.store.get_if_app_services_interested_in_user(user_id)
+ # We don't care about appservice users.
+ if not is_appservice:
+ for other_user_id in users_with_profile:
+ if user_id == other_user_id:
+ continue
- # First, if they're our user then we need to update for every user
- if self.is_mine_id(user_id) and not is_appservice:
- # Returns a map of other_user_id -> shared_private. We only need
- # to update mappings if for users that either don't share a room
- # already (aren't in the map) or, if the room is private, those that
- # only share a public room.
- user_ids_shared = yield self.store.get_users_who_share_room_from_dir(
- user_id
- )
+ to_insert.add((user_id, other_user_id))
+ # Next we need to update for every local user in the room
for other_user_id in users_with_profile:
if user_id == other_user_id:
continue
- shared_is_private = user_ids_shared.get(other_user_id)
- if shared_is_private is True:
- # We've already marked in the database they share a private room
- continue
- elif shared_is_private is False:
- # They already share a public room, so only update if this is
- # a private room
- if not is_public:
- to_update.add((user_id, other_user_id))
- elif shared_is_private is None:
- # This is the first time they both share a room
- to_insert.add((user_id, other_user_id))
-
- # Next we need to update for every local user in the room
- for other_user_id in users_with_profile:
- if user_id == other_user_id:
- continue
-
- is_appservice = self.store.get_if_app_services_interested_in_user(
- other_user_id
- )
- if self.is_mine_id(other_user_id) and not is_appservice:
- shared_is_private = yield self.store.get_if_users_share_a_room(
- other_user_id, user_id
+ is_appservice = self.store.get_if_app_services_interested_in_user(
+ other_user_id
)
- if shared_is_private is True:
- # We've already marked in the database they share a private room
- continue
- elif shared_is_private is False:
- # They already share a public room, so only update if this is
- # a private room
- if not is_public:
- to_update.add((other_user_id, user_id))
- elif shared_is_private is None:
- # This is the first time they both share a room
+ if self.is_mine_id(other_user_id) and not is_appservice:
to_insert.add((other_user_id, user_id))
- if to_insert:
- yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
-
- if to_update:
- yield self.store.update_users_who_share_room(
- room_id, not is_public, to_update
- )
+ if to_insert:
+ yield self.store.add_users_who_share_private_room(room_id, to_insert)
@defer.inlineCallbacks
def _handle_remove_user(self, room_id, user_id):
- """Called when we might need to remove user to directory
+ """Called when we might need to remove user from directory
Args:
room_id (str): room_id that user left or stopped being public that
user_id (str)
"""
- logger.debug("Maybe removing user %r", user_id)
+ logger.debug("Removing user %r", user_id)
- row = yield self.store.get_user_in_directory(user_id)
- update_user_dir = row and row["room_id"] == room_id
-
- row = yield self.store.get_user_in_public_room(user_id)
- update_user_in_public = row and row["room_id"] == room_id
-
- if update_user_in_public or update_user_dir:
- # XXX: Make this faster?
- rooms = yield self.store.get_rooms_for_user(user_id)
- for j_room_id in rooms:
- if not update_user_in_public and not update_user_dir:
- break
-
- is_in_room = yield self.store.is_host_joined(
- j_room_id, self.server_name
- )
-
- if not is_in_room:
- continue
+ # Remove user from sharing tables
+ yield self.store.remove_user_who_share_room(user_id, room_id)
- if update_user_dir:
- update_user_dir = False
- yield self.store.update_user_in_user_dir(user_id, j_room_id)
+ # Are they still in any rooms? If not, remove them entirely.
+ rooms_user_is_in = yield self.store.get_user_dir_rooms_user_is_in(user_id)
- is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
- j_room_id
- )
-
- if update_user_in_public and is_public:
- yield self.store.update_user_in_public_user_list(user_id, j_room_id)
- update_user_in_public = False
-
- if update_user_dir:
+ if len(rooms_user_is_in) == 0:
yield self.store.remove_from_user_dir(user_id)
- elif update_user_in_public:
- yield self.store.remove_from_user_in_public_room(user_id)
-
- # Now handle users_who_share_rooms.
-
- # Get a list of user tuples that were in the DB due to this room and
- # users (this includes tuples where the other user matches `user_id`)
- user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
- user_id, room_id
- )
-
- for user_id, other_user_id in user_tuples:
- # For each user tuple get a list of rooms that they still share,
- # trying to find a private room, and update the entry in the DB
- rooms = yield self.store.get_rooms_in_common_for_users(
- user_id, other_user_id
- )
-
- # If they dont share a room anymore, remove the mapping
- if not rooms:
- yield self.store.remove_user_who_share_room(user_id, other_user_id)
- continue
-
- found_public_share = None
- for j_room_id in rooms:
- is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
- j_room_id
- )
-
- if is_public:
- found_public_share = j_room_id
- else:
- found_public_share = None
- yield self.store.update_users_who_share_room(
- room_id, not is_public, [(user_id, other_user_id)]
- )
- break
-
- if found_public_share:
- yield self.store.update_users_who_share_room(
- room_id, not is_public, [(user_id, other_user_id)]
- )
@defer.inlineCallbacks
def _handle_profile_change(self, user_id, room_id, prev_event_id, event_id):
@@ -665,50 +404,4 @@ class UserDirectoryHandler(object):
new_avatar = event.content.get("avatar_url")
if prev_name != new_name or prev_avatar != new_avatar:
- yield self.store.update_profile_in_user_dir(
- user_id, new_name, new_avatar, room_id
- )
-
- @defer.inlineCallbacks
- def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
- """Given two events check if the `key_name` field in content changed
- from not matching `public_value` to doing so.
-
- For example, check if `history_visibility` (`key_name`) changed from
- `shared` to `world_readable` (`public_value`).
-
- Returns:
- None if the field in the events either both match `public_value`
- or if neither do, i.e. there has been no change.
- True if it didnt match `public_value` but now does
- False if it did match `public_value` but now doesn't
- """
- prev_event = None
- event = None
- if prev_event_id:
- prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
-
- if event_id:
- event = yield self.store.get_event(event_id, allow_none=True)
-
- if not event and not prev_event:
- logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
- defer.returnValue(None)
-
- prev_value = None
- value = None
-
- if prev_event:
- prev_value = prev_event.content.get(key_name)
-
- if event:
- value = event.content.get(key_name)
-
- logger.debug("prev_value: %r -> value: %r", prev_value, value)
-
- if value == public_value and prev_value != public_value:
- defer.returnValue(True)
- elif value != public_value and prev_value == public_value:
- defer.returnValue(False)
- else:
- defer.returnValue(None)
+ yield self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
diff --git a/synapse/http/client.py b/synapse/http/client.py
index ad454f4..77fe688 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -90,45 +90,50 @@ class IPBlacklistingResolver(object):
def resolveHostName(self, recv, hostname, portNumber=0):
r = recv()
- d = defer.Deferred()
addresses = []
- @provider(IResolutionReceiver)
- class EndpointReceiver(object):
- @staticmethod
- def resolutionBegan(resolutionInProgress):
- pass
+ def _callback():
+ r.resolutionBegan(None)
- @staticmethod
- def addressResolved(address):
- ip_address = IPAddress(address.host)
+ has_bad_ip = False
+ for i in addresses:
+ ip_address = IPAddress(i.host)
if check_against_blacklist(
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info(
- "Dropped %s from DNS resolution to %s" % (ip_address, hostname)
+ "Dropped %s from DNS resolution to %s due to blacklist" %
+ (ip_address, hostname)
)
- raise SynapseError(403, "IP address blocked by IP blacklist entry")
+ has_bad_ip = True
+
+ # if we have a blacklisted IP, we'd like to raise an error to block the
+ # request, but all we can really do from here is claim that there were no
+ # valid results.
+ if not has_bad_ip:
+ for i in addresses:
+ r.addressResolved(i)
+ r.resolutionComplete()
+ @provider(IResolutionReceiver)
+ class EndpointReceiver(object):
+ @staticmethod
+ def resolutionBegan(resolutionInProgress):
+ pass
+
+ @staticmethod
+ def addressResolved(address):
addresses.append(address)
@staticmethod
def resolutionComplete():
- d.callback(addresses)
+ _callback()
self._reactor.nameResolver.resolveHostName(
EndpointReceiver, hostname, portNumber=portNumber
)
- def _callback(addrs):
- r.resolutionBegan(None)
- for i in addrs:
- r.addressResolved(i)
- r.resolutionComplete()
-
- d.addCallback(_callback)
-
return r
@@ -160,7 +165,8 @@ class BlacklistingAgentWrapper(Agent):
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info(
- "Blocking access to %s because of blacklist" % (ip_address,)
+ "Blocking access to %s due to blacklist" %
+ (ip_address,)
)
e = SynapseError(403, "IP address blocked by IP blacklist entry")
return defer.fail(Failure(e))
@@ -258,9 +264,6 @@ class SimpleHttpClient(object):
uri (str): URI to query.
data (bytes): Data to send in the request body, if applicable.
headers (t.w.http_headers.Headers): Request headers.
-
- Raises:
- SynapseError: If the IP is blacklisted.
"""
# A small wrapper around self.agent.request() so we can easily attach
# counters to it
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 384d8a3..b4cbe97 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -68,9 +68,13 @@ class MatrixFederationAgent(object):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
- srv_resolver (SrvResolver|None):
+ _srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
+
+ _well_known_cache (TTLCache|None):
+ TTLCache impl for storing cached well-known lookups. None to use a default
+ implementation.
"""
def __init__(
@@ -145,7 +149,7 @@ class MatrixFederationAgent(object):
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
- res.tls_server_name.decode("ascii")
+ res.tls_server_name.decode("ascii"),
)
# make sure that the Host header is set correctly
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 1682c9a..7eefc7b 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -27,9 +27,11 @@ import treq
from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
+from zope.interface import implementer
from twisted.internet import defer, protocol
from twisted.internet.error import DNSLookupError
+from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.http_headers import Headers
@@ -44,6 +46,7 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.http import QuieterFileBodyProducer
+from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.util.async_helpers import timeout_deferred
from synapse.util.logcontext import make_deferred_yieldable
@@ -172,22 +175,99 @@ class MatrixFederationHttpClient(object):
self.hs = hs
self.signing_key = hs.config.signing_key[0]
self.server_name = hs.hostname
- reactor = hs.get_reactor()
+
+ real_reactor = hs.get_reactor()
+
+ # We need to use a DNS resolver which filters out blacklisted IP
+ # addresses, to prevent DNS rebinding.
+ nameResolver = IPBlacklistingResolver(
+ real_reactor, None, hs.config.federation_ip_range_blacklist,
+ )
+
+ @implementer(IReactorPluggableNameResolver)
+ class Reactor(object):
+ def __getattr__(_self, attr):
+ if attr == "nameResolver":
+ return nameResolver
+ else:
+ return getattr(real_reactor, attr)
+
+ self.reactor = Reactor()
self.agent = MatrixFederationAgent(
- hs.get_reactor(),
+ self.reactor,
tls_client_options_factory,
)
+
+ # Use a BlacklistingAgentWrapper to prevent circumventing the IP
+ # blacklist via IP literals in server names
+ self.agent = BlacklistingAgentWrapper(
+ self.agent, self.reactor,
+ ip_blacklist=hs.config.federation_ip_range_blacklist,
+ )
+
self.clock = hs.get_clock()
self._store = hs.get_datastore()
self.version_string_bytes = hs.version_string.encode('ascii')
self.default_timeout = 60
def schedule(x):
- reactor.callLater(_EPSILON, x)
+ self.reactor.callLater(_EPSILON, x)
self._cooperator = Cooperator(scheduler=schedule)
+ @defer.inlineCallbacks
+ def _send_request_with_optional_trailing_slash(
+ self,
+ request,
+ try_trailing_slash_on_400=False,
+ **send_request_args
+ ):
+ """Wrapper for _send_request which can optionally retry the request
+ upon receiving a combination of a 400 HTTP response code and a
+ 'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
+ due to #3622.
+
+ Args:
+ request (MatrixFederationRequest): details of request to be sent
+ try_trailing_slash_on_400 (bool): Whether on receiving a 400
+ 'M_UNRECOGNIZED' from the server to retry the request with a
+ trailing slash appended to the request path.
+ send_request_args (Dict): A dictionary of arguments to pass to
+ `_send_request()`.
+
+ Raises:
+ HttpResponseException: If we get an HTTP response code >= 300
+ (except 429).
+
+ Returns:
+ Deferred[Dict]: Parsed JSON response body.
+ """
+ try:
+ response = yield self._send_request(
+ request, **send_request_args
+ )
+ except HttpResponseException as e:
+ # Received an HTTP error > 300. Check if it meets the requirements
+ # to retry with a trailing slash
+ if not try_trailing_slash_on_400:
+ raise
+
+ if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
+ raise
+
+ # Retry with a trailing slash if we received a 400 with
+ # 'M_UNRECOGNIZED' which some endpoints can return when omitting a
+ # trailing slash on Synapse <= v0.99.3.
+ logger.info("Retrying request with trailing slash")
+ request.path += "/"
+
+ response = yield self._send_request(
+ request, **send_request_args
+ )
+
+ defer.returnValue(response)
+
@defer.inlineCallbacks
def _send_request(
self,
@@ -196,7 +276,7 @@ class MatrixFederationHttpClient(object):
timeout=None,
long_retries=False,
ignore_backoff=False,
- backoff_on_404=False
+ backoff_on_404=False,
):
"""
Sends a request to the given server.
@@ -318,7 +398,7 @@ class MatrixFederationHttpClient(object):
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
- reactor=self.hs.get_reactor(),
+ reactor=self.reactor,
)
response = yield request_deferred
@@ -345,7 +425,7 @@ class MatrixFederationHttpClient(object):
d = timeout_deferred(
d,
timeout=_sec_timeout,
- reactor=self.hs.get_reactor(),
+ reactor=self.reactor,
)
try:
@@ -473,7 +553,8 @@ class MatrixFederationHttpClient(object):
json_data_callback=None,
long_retries=False, timeout=None,
ignore_backoff=False,
- backoff_on_404=False):
+ backoff_on_404=False,
+ try_trailing_slash_on_400=False):
""" Sends the specifed json data using PUT
Args:
@@ -493,7 +574,12 @@ class MatrixFederationHttpClient(object):
and try the request anyway.
backoff_on_404 (bool): True if we should count a 404 response as
a failure of the server (and should therefore back off future
- requests)
+ requests).
+ try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
+ response we should try appending a trailing slash to the end
+ of the request. Workaround for #3622 in Synapse <= v0.99.3. This
+ will be attempted before backing off if backing off has been
+ enabled.
Returns:
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
@@ -509,7 +595,6 @@ class MatrixFederationHttpClient(object):
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
-
request = MatrixFederationRequest(
method="PUT",
destination=destination,
@@ -519,17 +604,19 @@ class MatrixFederationHttpClient(object):
json=data,
)
- response = yield self._send_request(
+ response = yield self._send_request_with_optional_trailing_slash(
request,
+ try_trailing_slash_on_400,
+ backoff_on_404=backoff_on_404,
+ ignore_backoff=ignore_backoff,
long_retries=long_retries,
timeout=timeout,
- ignore_backoff=ignore_backoff,
- backoff_on_404=backoff_on_404,
)
body = yield _handle_json_response(
- self.hs.get_reactor(), self.default_timeout, request, response,
+ self.reactor, self.default_timeout, request, response,
)
+
defer.returnValue(body)
@defer.inlineCallbacks
@@ -586,13 +673,14 @@ class MatrixFederationHttpClient(object):
_sec_timeout = self.default_timeout
body = yield _handle_json_response(
- self.hs.get_reactor(), _sec_timeout, request, response,
+ self.reactor, _sec_timeout, request, response,
)
defer.returnValue(body)
@defer.inlineCallbacks
def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
- timeout=None, ignore_backoff=False):
+ timeout=None, ignore_backoff=False,
+ try_trailing_slash_on_400=False):
""" GETs some json from the given host homeserver and path
Args:
@@ -606,6 +694,9 @@ class MatrixFederationHttpClient(object):
be retried.
ignore_backoff (bool): true to ignore the historical backoff data
and try the request anyway.
+ try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
+ response we should try appending a trailing slash to the end of
+ the request. Workaround for #3622 in Synapse <= v0.99.3.
Returns:
Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
@@ -631,16 +722,19 @@ class MatrixFederationHttpClient(object):
query=args,
)
- response = yield self._send_request(
+ response = yield self._send_request_with_optional_trailing_slash(
request,
+ try_trailing_slash_on_400,
+ backoff_on_404=False,
+ ignore_backoff=ignore_backoff,
retry_on_dns_fail=retry_on_dns_fail,
timeout=timeout,
- ignore_backoff=ignore_backoff,
)
body = yield _handle_json_response(
- self.hs.get_reactor(), self.default_timeout, request, response,
+ self.reactor, self.default_timeout, request, response,
)
+
defer.returnValue(body)
@defer.inlineCallbacks
@@ -687,7 +781,7 @@ class MatrixFederationHttpClient(object):
)
body = yield _handle_json_response(
- self.hs.get_reactor(), self.default_timeout, request, response,
+ self.reactor, self.default_timeout, request, response,
)
defer.returnValue(body)
@@ -735,7 +829,7 @@ class MatrixFederationHttpClient(object):
try:
d = _readBodyToFile(response, output_stream, max_size)
- d.addTimeout(self.default_timeout, self.hs.get_reactor())
+ d.addTimeout(self.default_timeout, self.reactor)
length = yield make_deferred_yieldable(d)
except Exception as e:
logger.warn(
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index fc9a20f..b3abd1b 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -73,14 +73,27 @@ class ModuleApi(object):
"""
return self._auth_handler.check_user_exists(user_id)
- def register(self, localpart):
- """Registers a new user with given localpart
+ @defer.inlineCallbacks
+ def register(self, localpart, displayname=None, emails=[]):
+ """Registers a new user with given localpart and optional
+ displayname, emails.
+
+ Args:
+ localpart (str): The localpart of the new user.
+ displayname (str|None): The displayname of the new user.
+ emails (List[str]): Emails to bind to the new user.
Returns:
Deferred: a 2-tuple of (user_id, access_token)
"""
+ # Register the user
reg = self.hs.get_registration_handler()
- return reg.register(localpart=localpart)
+ user_id, access_token = yield reg.register(
+ localpart=localpart, default_display_name=displayname,
+ bind_emails=emails,
+ )
+
+ defer.returnValue((user_id, access_token))
@defer.inlineCallbacks
def invalidate_access_token(self, access_token):
diff --git a/synapse/notifier.py b/synapse/notifier.py
index de02b10..ff58966 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -178,8 +178,6 @@ class Notifier(object):
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
)
- self.replication_deferred = ObservableDeferred(defer.Deferred())
-
# This is not a very cheap test to perform, but it's only executed
# when rendering the metrics page, which is likely once per minute at
# most when scraping it.
@@ -205,7 +203,9 @@ class Notifier(object):
def add_replication_callback(self, cb):
"""Add a callback that will be called when some new data is available.
- Callback is not given any arguments.
+ Callback is not given any arguments. It should *not* return a Deferred - if
+ it needs to do any asynchronous work, a background thread should be started and
+ wrapped with run_as_background_process.
"""
self.replication_callbacks.append(cb)
@@ -517,60 +517,5 @@ class Notifier(object):
def notify_replication(self):
"""Notify the any replication listeners that there's a new event"""
- with PreserveLoggingContext():
- deferred = self.replication_deferred
- self.replication_deferred = ObservableDeferred(defer.Deferred())
- deferred.callback(None)
-
- # the callbacks may well outlast the current request, so we run
- # them in the sentinel logcontext.
- #
- # (ideally it would be up to the callbacks to know if they were
- # starting off background processes and drop the logcontext
- # accordingly, but that requires more changes)
- for cb in self.replication_callbacks:
- cb()
-
- @defer.inlineCallbacks
- def wait_for_replication(self, callback, timeout):
- """Wait for an event to happen.
-
- Args:
- callback: Gets called whenever an event happens. If this returns a
- truthy value then ``wait_for_replication`` returns, otherwise
- it waits for another event.
- timeout: How many milliseconds to wait for callback return a truthy
- value.
-
- Returns:
- A deferred that resolves with the value returned by the callback.
- """
- listener = _NotificationListener(None)
-
- end_time = self.clock.time_msec() + timeout
-
- while True:
- listener.deferred = self.replication_deferred.observe()
- result = yield callback()
- if result:
- break
-
- now = self.clock.time_msec()
- if end_time <= now:
- break
-
- listener.deferred = timeout_deferred(
- listener.deferred,
- timeout=(end_time - now) / 1000.,
- reactor=self.hs.get_reactor(),
- )
-
- try:
- with PreserveLoggingContext():
- yield listener.deferred
- except defer.TimeoutError:
- break
- except defer.CancelledError:
- break
-
- defer.returnValue(result)
+ for cb in self.replication_callbacks:
+ cb()
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 8f0682c..3523a40 100644
--- a/synapse/push/baserules.py
+++ b/synapse/push/baserules.py
@@ -261,6 +261,23 @@ BASE_APPEND_OVERRIDE_RULES = [
'value': True,
}
]
+ },
+ {
+ 'rule_id': 'global/override/.m.rule.tombstone',
+ 'conditions': [
+ {
+ 'kind': 'event_match',
+ 'key': 'type',
+ 'pattern': 'm.room.tombstone',
+ '_id': '_tombstone',
+ }
+ ],
+ 'actions': [
+ 'notify', {
+ 'set_tweak': 'highlight',
+ 'value': True,
+ }
+ ]
}
]
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index 50e1007..e8ee674 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -72,8 +72,15 @@ class EmailPusher(object):
self._is_processing = False
- def on_started(self):
- if self.mailer is not None:
+ def on_started(self, should_check_for_notifs):
+ """Called when this pusher has been started.
+
+ Args:
+ should_check_for_notifs (bool): Whether we should immediately
+ check for push to send. Set to False only if it's known there
+ is nothing to send
+ """
+ if should_check_for_notifs and self.mailer is not None:
self._start_processing()
def on_stop(self):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index e65f8c6..fac05aa 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -112,8 +112,16 @@ class HttpPusher(object):
self.data_minus_url.update(self.data)
del self.data_minus_url['url']
- def on_started(self):
- self._start_processing()
+ def on_started(self, should_check_for_notifs):
+ """Called when this pusher has been started.
+
+ Args:
+ should_check_for_notifs (bool): Whether we should immediately
+ check for push to send. Set to False only if it's known there
+ is nothing to send
+ """
+ if should_check_for_notifs:
+ self._start_processing()
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 1eb5be0..c269bcf 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -521,11 +521,11 @@ def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
-def load_jinja2_templates(config):
+def load_jinja2_templates(config, template_html_name, template_text_name):
"""Load the jinja2 email templates from disk
Returns:
- (notif_template_html, notif_template_text)
+ (template_html, template_text)
"""
logger.info("loading email templates from '%s'", config.email_template_dir)
loader = jinja2.FileSystemLoader(config.email_template_dir)
@@ -533,14 +533,10 @@ def load_jinja2_templates(config):
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
- notif_template_html = env.get_template(
- config.email_notif_template_html
- )
- notif_template_text = env.get_template(
- config.email_notif_template_text
- )
+ template_html = env.get_template(template_html_name)
+ template_text = env.get_template(template_text_name)
- return notif_template_html, notif_template_text
+ return template_html, template_text
def _create_mxc_to_http_filter(config):
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index b33f2a3..14bc782 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -44,7 +44,11 @@ class PusherFactory(object):
if hs.config.email_enable_notifs:
self.mailers = {} # app_name -> Mailer
- templates = load_jinja2_templates(hs.config)
+ templates = load_jinja2_templates(
+ config=hs.config,
+ template_html_name=hs.config.email_notif_template_html,
+ template_text_name=hs.config.email_notif_template_text,
+ )
self.notif_template_html, self.notif_template_text = templates
self.pusher_types["email"] = self._create_email_pusher
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index abf1a1a..40a7709 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -21,6 +21,7 @@ from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
from synapse.push.pusher import PusherFactory
+from synapse.util.async_helpers import concurrently_execute
logger = logging.getLogger(__name__)
@@ -197,7 +198,7 @@ class PusherPool:
p = r
if p:
- self._start_pusher(p)
+ yield self._start_pusher(p)
@defer.inlineCallbacks
def _start_pushers(self):
@@ -208,10 +209,14 @@ class PusherPool:
"""
pushers = yield self.store.get_all_pushers()
logger.info("Starting %d pushers", len(pushers))
- for pusherdict in pushers:
- self._start_pusher(pusherdict)
+
+ # Stagger starting up the pushers so we don't completely drown the
+ # process on start up.
+ yield concurrently_execute(self._start_pusher, pushers, 10)
+
logger.info("Started pushers")
+ @defer.inlineCallbacks
def _start_pusher(self, pusherdict):
"""Start the given pusher
@@ -248,7 +253,22 @@ class PusherPool:
if appid_pushkey in byuser:
byuser[appid_pushkey].on_stop()
byuser[appid_pushkey] = p
- p.on_started()
+
+ # Check if there *may* be push to process. We do this as this check is a
+ # lot cheaper to do than actually fetching the exact rows we need to
+ # push.
+ user_id = pusherdict["user_name"]
+ last_stream_ordering = pusherdict["last_stream_ordering"]
+ if last_stream_ordering:
+ have_notifs = yield self.store.get_if_maybe_push_in_range_for_user(
+ user_id, last_stream_ordering,
+ )
+ else:
+ # We always want to default to starting up the pusher rather than
+ # risk missing push.
+ have_notifs = True
+
+ p.on_started(have_notifs)
@defer.inlineCallbacks
def remove_pusher(self, app_id, pushkey, user_id):
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 6593485..65eb8b7 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -16,7 +16,12 @@
import logging
-from pkg_resources import DistributionNotFound, VersionConflict, get_distribution
+from pkg_resources import (
+ DistributionNotFound,
+ Requirement,
+ VersionConflict,
+ get_provider,
+)
logger = logging.getLogger(__name__)
@@ -53,7 +58,7 @@ REQUIREMENTS = [
"pyasn1-modules>=0.0.7",
"daemonize>=2.3.1",
"bcrypt>=3.1.0",
- "pillow>=3.1.2",
+ "pillow>=4.3.0",
"sortedcontainers>=1.4.4",
"psutil>=2.0.0",
"pymacaroons>=0.13.0",
@@ -69,33 +74,53 @@ REQUIREMENTS = [
"attrs>=17.4.0",
"netaddr>=0.7.18",
+
+ # requests is a transitive dep of treq, and urlib3 is a transitive dep
+ # of requests, as well as of sentry-sdk.
+ #
+ # As of requests 2.21, requests does not yet support urllib3 1.25.
+ # (If we do not pin it here, pip will give us the latest urllib3
+ # due to the dep via sentry-sdk.)
+ "urllib3<1.25",
]
CONDITIONAL_REQUIREMENTS = {
"email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
- "postgres": ["psycopg2>=2.6"],
+
+ # we use execute_batch, which arrived in psycopg 2.7.
+ "postgres": ["psycopg2>=2.7"],
# ConsentResource uses select_autoescape, which arrived in jinja 2.9
"resources.consent": ["Jinja2>=2.9"],
# ACME support is required to provision TLS certificates from authorities
# that use the protocol, such as Let's Encrypt.
- "acme": ["txacme>=0.9.2"],
+ "acme": [
+ "txacme>=0.9.2",
+
+ # txacme depends on eliot. Eliot 1.8.0 is incompatible with
+ # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418
+ 'eliot<1.8.0;python_version<"3.5.3"',
+ ],
"saml2": ["pysaml2>=4.5.0"],
+ "systemd": ["systemd-python>=231"],
"url_preview": ["lxml>=3.5.0"],
"test": ["mock>=2.0", "parameterized"],
"sentry": ["sentry-sdk>=0.7.2"],
}
+ALL_OPTIONAL_REQUIREMENTS = set()
+
+for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
+ # Exclude systemd as it's a system-based requirement.
+ if name not in ["systemd"]:
+ ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
-def list_requirements():
- deps = set(REQUIREMENTS)
- for opt in CONDITIONAL_REQUIREMENTS.values():
- deps = set(opt) | deps
- return list(deps)
+def list_requirements():
+ return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
class DependencyException(Exception):
@@ -111,10 +136,10 @@ class DependencyException(Exception):
@property
def dependencies(self):
for i in self.args[0]:
- yield 'python3-' + i
+ yield "python3-" + i
-def check_requirements(for_feature=None, _get_distribution=get_distribution):
+def check_requirements(for_feature=None):
deps_needed = []
errors = []
@@ -125,7 +150,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
for dependency in reqs:
try:
- _get_distribution(dependency)
+ _check_requirement(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
@@ -143,7 +168,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
for dependency in OPTS:
try:
- _get_distribution(dependency)
+ _check_requirement(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
@@ -161,6 +186,23 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution):
raise DependencyException(deps_needed)
+def _check_requirement(dependency_string):
+ """Parses a dependency string, and checks if the specified requirement is installed
+
+ Raises:
+ VersionConflict if the requirement is installed, but with the the wrong version
+ DistributionNotFound if nothing is found to provide the requirement
+ """
+ req = Requirement.parse(dependency_string)
+
+ # first check if the markers specify that this requirement needs installing
+ if req.marker is not None and not req.marker.evaluate():
+ # not required for this environment
+ return
+
+ get_provider(req)
+
+
if __name__ == "__main__":
import sys
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 1d27c92..912a5ac 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -33,11 +33,12 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
def __init__(self, hs):
super(ReplicationRegisterServlet, self).__init__(hs)
self.store = hs.get_datastore()
+ self.registration_handler = hs.get_registration_handler()
@staticmethod
def _serialize_payload(
user_id, token, password_hash, was_guest, make_guest, appservice_id,
- create_profile_with_displayname, admin, user_type,
+ create_profile_with_displayname, admin, user_type, address,
):
"""
Args:
@@ -56,6 +57,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
admin (boolean): is an admin user?
user_type (str|None): type of user. One of the values from
api.constants.UserTypes, or None for a normal user.
+ address (str|None): the IP address used to perform the regitration.
"""
return {
"token": token,
@@ -66,13 +68,14 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
"create_profile_with_displayname": create_profile_with_displayname,
"admin": admin,
"user_type": user_type,
+ "address": address,
}
@defer.inlineCallbacks
def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
- yield self.store.register(
+ yield self.registration_handler.register_with_store(
user_id=user_id,
token=content["token"],
password_hash=content["password_hash"],
@@ -82,6 +85,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
create_profile_with_displayname=content["create_profile_with_displayname"],
admin=content["admin"],
user_type=content["user_type"],
+ address=content["address"]
)
defer.returnValue((200, {}))
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index 60641f1..5b8521c 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -43,6 +43,8 @@ class SlavedClientIpStore(BaseSlavedStore):
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
return
+ self.client_ip_last_seen.prefill(key, now)
+
self.hs.get_tcp_replication().send_user_ip(
user_id, access_token, ip, user_agent, device_id, now
)
diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py
index 4f19fd3..4d59778 100644
--- a/synapse/replication/slave/storage/deviceinbox.py
+++ b/synapse/replication/slave/storage/deviceinbox.py
@@ -13,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage import DataStore
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
+from synapse.storage.deviceinbox import DeviceInboxWorkerStore
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore, __func__
-from ._slaved_id_tracker import SlavedIdTracker
-
-class SlavedDeviceInboxStore(BaseSlavedStore):
+class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore):
def __init__(self, db_conn, hs):
super(SlavedDeviceInboxStore, self).__init__(db_conn, hs)
self._device_inbox_id_gen = SlavedIdTracker(
@@ -43,12 +42,6 @@ class SlavedDeviceInboxStore(BaseSlavedStore):
expiry_ms=30 * 60 * 1000,
)
- get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token)
- get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device)
- get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote)
- delete_messages_for_device = __func__(DataStore.delete_messages_for_device)
- delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote)
-
def stream_positions(self):
result = super(SlavedDeviceInboxStore, self).stream_positions()
result["to_device"] = self._device_inbox_id_gen.get_current_token()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index ec2fd56..16c9a16 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -13,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage import DataStore
-from synapse.storage.end_to_end_keys import EndToEndKeyStore
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
+from synapse.storage.devices import DeviceWorkerStore
+from synapse.storage.end_to_end_keys import EndToEndKeyWorkerStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore, __func__
-from ._slaved_id_tracker import SlavedIdTracker
-
-class SlavedDeviceStore(BaseSlavedStore):
+class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
def __init__(self, db_conn, hs):
super(SlavedDeviceStore, self).__init__(db_conn, hs)
@@ -38,17 +37,6 @@ class SlavedDeviceStore(BaseSlavedStore):
"DeviceListFederationStreamChangeCache", device_list_max,
)
- get_device_stream_token = __func__(DataStore.get_device_stream_token)
- get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed)
- get_devices_by_remote = __func__(DataStore.get_devices_by_remote)
- _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn)
- _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn)
- mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote)
- _mark_as_sent_devices_by_remote_txn = (
- __func__(DataStore._mark_as_sent_devices_by_remote_txn)
- )
- count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"]
-
def stream_positions(self):
result = super(SlavedDeviceStore, self).stream_positions()
result["device_lists"] = self._device_list_id_gen.get_current_token()
@@ -58,14 +46,23 @@ class SlavedDeviceStore(BaseSlavedStore):
if stream_name == "device_lists":
self._device_list_id_gen.advance(token)
for row in rows:
- self._device_list_stream_cache.entity_has_changed(
- row.user_id, token
+ self._invalidate_caches_for_devices(
+ token, row.user_id, row.destination,
)
-
- if row.destination:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.destination, token
- )
return super(SlavedDeviceStore, self).process_replication_rows(
stream_name, token, rows
)
+
+ def _invalidate_caches_for_devices(self, token, user_id, destination):
+ self._device_list_stream_cache.entity_has_changed(
+ user_id, token
+ )
+
+ if destination:
+ self._device_list_federation_stream_cache.entity_has_changed(
+ destination, token
+ )
+
+ self._get_cached_devices_for_user.invalidate((user_id,))
+ self._get_cached_user_device.invalidate_many((user_id,))
+ self.get_device_list_last_stream_id_for_remote.invalidate((user_id,))
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 4830c68..a395250 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -16,9 +16,14 @@
import logging
from synapse.api.constants import EventTypes
+from synapse.replication.tcp.streams.events import (
+ EventsStreamCurrentStateRow,
+ EventsStreamEventRow,
+)
from synapse.storage.event_federation import EventFederationWorkerStore
from synapse.storage.event_push_actions import EventPushActionsWorkerStore
from synapse.storage.events_worker import EventsWorkerStore
+from synapse.storage.relations import RelationsWorkerStore
from synapse.storage.roommember import RoomMemberWorkerStore
from synapse.storage.signatures import SignatureWorkerStore
from synapse.storage.state import StateGroupWorkerStore
@@ -48,6 +53,7 @@ class SlavedEventStore(EventFederationWorkerStore,
EventsWorkerStore,
SignatureWorkerStore,
UserErasureWorkerStore,
+ RelationsWorkerStore,
BaseSlavedStore):
def __init__(self, db_conn, hs):
@@ -79,25 +85,39 @@ class SlavedEventStore(EventFederationWorkerStore,
if stream_name == "events":
self._stream_id_gen.advance(token)
for row in rows:
- self.invalidate_caches_for_event(
- token, row.event_id, row.room_id, row.type, row.state_key,
- row.redacts,
- backfilled=False,
- )
+ self._process_event_stream_row(token, row)
elif stream_name == "backfill":
self._backfill_id_gen.advance(-token)
for row in rows:
self.invalidate_caches_for_event(
-token, row.event_id, row.room_id, row.type, row.state_key,
- row.redacts,
+ row.redacts, row.relates_to,
backfilled=True,
)
return super(SlavedEventStore, self).process_replication_rows(
stream_name, token, rows
)
+ def _process_event_stream_row(self, token, row):
+ data = row.data
+
+ if row.type == EventsStreamEventRow.TypeId:
+ self.invalidate_caches_for_event(
+ token, data.event_id, data.room_id, data.type, data.state_key,
+ data.redacts, data.relates_to,
+ backfilled=False,
+ )
+ elif row.type == EventsStreamCurrentStateRow.TypeId:
+ if data.type == EventTypes.Member:
+ self.get_rooms_for_user_with_stream_ordering.invalidate(
+ (data.state_key, ),
+ )
+ else:
+ raise Exception("Unknown events stream row type %s" % (row.type, ))
+
def invalidate_caches_for_event(self, stream_ordering, event_id, room_id,
- etype, state_key, redacts, backfilled):
+ etype, state_key, redacts, relates_to,
+ backfilled):
self._invalidate_get_event_cache(event_id)
self.get_latest_event_ids_in_room.invalidate((room_id,))
@@ -119,3 +139,8 @@ class SlavedEventStore(EventFederationWorkerStore,
state_key, stream_ordering
)
self.get_invited_rooms_for_user.invalidate((state_key,))
+
+ if relates_to:
+ self.get_relations_for_event.invalidate_many((relates_to,))
+ self.get_aggregation_groups_for_event.invalidate_many((relates_to,))
+ self.get_applicable_edit.invalidate((relates_to,))
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
index 8032f53..cc6f7f0 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/synapse/replication/slave/storage/keys.py
@@ -13,22 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage import DataStore
-from synapse.storage.keys import KeyStore
+from synapse.storage import KeyStore
-from ._base import BaseSlavedStore, __func__
+# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+# the races it creates aren't too bad.
-
-class SlavedKeyStore(BaseSlavedStore):
- _get_server_verify_key = KeyStore.__dict__[
- "_get_server_verify_key"
- ]
-
- get_server_verify_keys = __func__(DataStore.get_server_verify_keys)
- store_server_verify_key = __func__(DataStore.store_server_verify_key)
-
- get_server_certificate = __func__(DataStore.get_server_certificate)
- store_server_certificate = __func__(DataStore.store_server_certificate)
-
- get_server_keys_json = __func__(DataStore.get_server_keys_json)
- store_server_keys_json = __func__(DataStore.store_server_keys_json)
+SlavedKeyStore = KeyStore
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
index 92447b0..0ec1db2 100644
--- a/synapse/replication/slave/storage/presence.py
+++ b/synapse/replication/slave/storage/presence.py
@@ -39,23 +39,16 @@ class SlavedPresenceStore(BaseSlavedStore):
_get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"]
get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"]
- # XXX: This is a bit broken because we don't persist the accepted list in a
- # way that can be replicated. This means that we don't have a way to
- # invalidate the cache correctly.
- get_presence_list_accepted = PresenceStore.__dict__[
- "get_presence_list_accepted"
- ]
- get_presence_list_observers_accepted = PresenceStore.__dict__[
- "get_presence_list_observers_accepted"
- ]
-
def get_current_presence_token(self):
return self._presence_id_gen.get_current_token()
def stream_positions(self):
result = super(SlavedPresenceStore, self).stream_positions()
- position = self._presence_id_gen.get_current_token()
- result["presence"] = position
+
+ if self.hs.config.use_presence:
+ position = self._presence_id_gen.get_current_token()
+ result["presence"] = position
+
return result
def process_replication_rows(self, stream_name, token, rows):
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
index f0200c1..45fc913 100644
--- a/synapse/replication/slave/storage/push_rule.py
+++ b/synapse/replication/slave/storage/push_rule.py
@@ -20,7 +20,7 @@ from ._slaved_id_tracker import SlavedIdTracker
from .events import SlavedEventStore
-class SlavedPushRuleStore(PushRulesWorkerStore, SlavedEventStore):
+class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
def __init__(self, db_conn, hs):
self._push_rules_stream_id_gen = SlavedIdTracker(
db_conn, "push_rules_stream", "stream_id",
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 586dddb..206dc3b 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -39,7 +39,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
Accepts a handler that will be called when new data is available or data
is required.
"""
- maxDelay = 5 # Try at least once every N seconds
+ maxDelay = 30 # Try at least once every N seconds
def __init__(self, hs, client_name, handler):
self.client_name = client_name
@@ -54,7 +54,6 @@ class ReplicationClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr):
logger.info("Connected to replication: %r", addr)
- self.resetDelay()
return ClientReplicationStreamProtocol(
self.client_name, self.server_name, self._clock, self.handler
)
@@ -90,21 +89,33 @@ class ReplicationClientHandler(object):
# Used for tests.
self.awaiting_syncs = {}
+ # The factory used to create connections.
+ self.factory = None
+
def start_replication(self, hs):
"""Helper method to start a replication connection to the remote server
using TCP.
"""
client_name = hs.config.worker_name
- factory = ReplicationClientFactory(hs, client_name, self)
+ self.factory = ReplicationClientFactory(hs, client_name, self)
host = hs.config.worker_replication_host
port = hs.config.worker_replication_port
- hs.get_reactor().connectTCP(host, port, factory)
+ hs.get_reactor().connectTCP(host, port, self.factory)
def on_rdata(self, stream_name, token, rows):
- """Called when we get new replication data. By default this just pokes
- the slave store.
+ """Called to handle a batch of replication data with a given stream token.
- Can be overriden in subclasses to handle more.
+ By default this just pokes the slave store. Can be overridden in subclasses to
+ handle more.
+
+ Args:
+ stream_name (str): name of the replication stream for this batch of rows
+ token (int): stream token for this batch of rows
+ rows (list): a list of Stream.ROW_TYPE objects as returned by
+ Stream.parse_row.
+
+ Returns:
+ Deferred|None
"""
logger.debug("Received rdata %s -> %s", stream_name, token)
return self.store.process_replication_rows(stream_name, token, rows)
@@ -140,6 +151,7 @@ class ReplicationClientHandler(object):
args["account_data"] = user_account_data
elif room_account_data:
args["account_data"] = room_account_data
+
return args
def get_currently_syncing_users(self):
@@ -204,3 +216,14 @@ class ReplicationClientHandler(object):
for cmd in self.pending_commands:
connection.send_command(cmd)
self.pending_commands = []
+
+ def finished_connecting(self):
+ """Called when we have successfully subscribed and caught up to all
+ streams we're interested in.
+ """
+ logger.info("Finished connecting to server")
+
+ # We don't reset the delay any earlier as otherwise if there is a
+ # problem during start up we'll end up tight looping connecting to the
+ # server.
+ self.factory.resetDelay()
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 327556f..2098c32 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -127,8 +127,11 @@ class RdataCommand(Command):
class PositionCommand(Command):
- """Sent by the client to tell the client the stream postition without
+ """Sent by the server to tell the client the stream postition without
needing to send an RDATA.
+
+ Sent to the client after all missing updates for a stream have been sent
+ to the client and they're now up to date.
"""
NAME = "POSITION"
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 429471c..b51590c 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -42,8 +42,8 @@ indicate which side is sending, these are *not* included on the wire::
> POSITION backfill 1
> POSITION caches 1
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
- > RDATA events 14 ["$149019767112vOHxz:localhost:8823",
- "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
+ > RDATA events 14 ["ev", ["$149019767112vOHxz:localhost:8823",
+ "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]]
< PING 1490197675618
> ERROR server stopping
* connection closed by server *
@@ -223,14 +223,25 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
return
# Now lets try and call on_<CMD_NAME> function
- try:
- run_as_background_process(
- "replication-" + cmd.get_logcontext_id(),
- getattr(self, "on_%s" % (cmd_name,)),
- cmd,
- )
- except Exception:
- logger.exception("[%s] Failed to handle line: %r", self.id(), line)
+ run_as_background_process(
+ "replication-" + cmd.get_logcontext_id(),
+ self.handle_command,
+ cmd,
+ )
+
+ def handle_command(self, cmd):
+ """Handle a command we have received over the replication stream.
+
+ By default delegates to on_<COMMAND>
+
+ Args:
+ cmd (synapse.replication.tcp.commands.Command): received command
+
+ Returns:
+ Deferred
+ """
+ handler = getattr(self, "on_%s" % (cmd.NAME,))
+ return handler(cmd)
def close(self):
logger.warn("[%s] Closing connection", self.id())
@@ -364,8 +375,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self.transport.unregisterProducer()
def __str__(self):
+ addr = None
+ if self.transport:
+ addr = str(self.transport.getPeer())
return "ReplicationConnection<name=%s,conn_id=%s,addr=%s>" % (
- self.name, self.conn_id, self.addr,
+ self.name, self.conn_id, addr,
)
def id(self):
@@ -381,12 +395,11 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS
- def __init__(self, server_name, clock, streamer, addr):
+ def __init__(self, server_name, clock, streamer):
BaseReplicationStreamProtocol.__init__(self, clock) # Old style class
self.server_name = server_name
self.streamer = streamer
- self.addr = addr
# The streams the client has subscribed to and is up to date with
self.replication_streams = set()
@@ -451,7 +464,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
@defer.inlineCallbacks
def subscribe_to_stream(self, stream_name, token):
- """Subscribe the remote to a streams.
+ """Subscribe the remote to a stream.
This invloves checking if they've missed anything and sending those
updates down if they have. During that time new updates for the stream
@@ -478,11 +491,36 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
# Now we can send any updates that came in while we were subscribing
pending_rdata = self.pending_rdata.pop(stream_name, [])
+ updates = []
for token, update in pending_rdata:
- # Only send updates newer than the current token
- if token > current_token:
+ # If the token is null, it is part of a batch update. Batches
+ # are multiple updates that share a single token. To denote
+ # this, the token is set to None for all tokens in the batch
+ # except for the last. If we find a None token, we keep looking
+ # through tokens until we find one that is not None and then
+ # process all previous updates in the batch as if they had the
+ # final token.
+ if token is None:
+ # Store this update as part of a batch
+ updates.append(update)
+ continue
+
+ if token <= current_token:
+ # This update or batch of updates is older than
+ # current_token, dismiss it
+ updates = []
+ continue
+
+ updates.append(update)
+
+ # Send all updates that are part of this batch with the
+ # found token
+ for update in updates:
self.send_command(RdataCommand(stream_name, token, update))
+ # Clear stored updates
+ updates = []
+
# They're now fully subscribed
self.replication_streams.add(stream_name)
except Exception as e:
@@ -526,6 +564,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.server_name = server_name
self.handler = handler
+ # Set of stream names that have been subscribe to, but haven't yet
+ # caught up with. This is used to track when the client has been fully
+ # connected to the remote.
+ self.streams_connecting = set()
+
# Map of stream to batched updates. See RdataCommand for info on how
# batching works.
self.pending_batches = {}
@@ -548,6 +591,10 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
# We've now finished connecting to so inform the client handler
self.handler.update_connection(self)
+ # This will happen if we don't actually subscribe to any streams
+ if not self.streams_connecting:
+ self.handler.finished_connecting()
+
def on_SERVER(self, cmd):
if cmd.data != self.server_name:
logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@@ -558,7 +605,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
inbound_rdata_count.labels(stream_name).inc()
try:
- row = STREAMS_MAP[stream_name].ROW_TYPE(*cmd.row)
+ row = STREAMS_MAP[stream_name].parse_row(cmd.row)
except Exception:
logger.exception(
"[%s] Failed to parse RDATA: %r %r",
@@ -577,6 +624,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
return self.handler.on_rdata(stream_name, cmd.token, rows)
def on_POSITION(self, cmd):
+ # When we get a `POSITION` command it means we've finished getting
+ # missing updates for the given stream, and are now up to date.
+ self.streams_connecting.discard(cmd.stream_name)
+ if not self.streams_connecting:
+ self.handler.finished_connecting()
+
return self.handler.on_position(cmd.stream_name, cmd.token)
def on_SYNC(self, cmd):
@@ -593,6 +646,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.id(), stream_name, token
)
+ self.streams_connecting.add(stream_name)
+
self.send_command(ReplicateCommand(stream_name, token))
def on_connection_closed(self):
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index fd59f15..f6a38f5 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -16,6 +16,7 @@
"""
import logging
+import random
from six import itervalues
@@ -29,7 +30,8 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.metrics import Measure, measure_func
from .protocol import ServerReplicationStreamProtocol
-from .streams import STREAMS_MAP, FederationStream
+from .streams import STREAMS_MAP
+from .streams.federation import FederationStream
stream_updates_counter = Counter("synapse_replication_tcp_resource_stream_updates",
"", ["stream_name"])
@@ -56,7 +58,6 @@ class ReplicationStreamProtocolFactory(Factory):
self.server_name,
self.clock,
self.streamer,
- addr
)
@@ -74,6 +75,8 @@ class ReplicationStreamer(object):
self.notifier = hs.get_notifier()
self._server_notices_sender = hs.get_server_notices_sender()
+ self._replication_torture_level = hs.config.replication_torture_level
+
# Current connections.
self.connections = []
@@ -157,10 +160,23 @@ class ReplicationStreamer(object):
for stream in self.streams:
stream.advance_current_token()
- for stream in self.streams:
+ all_streams = self.streams
+
+ if self._replication_torture_level is not None:
+ # there is no guarantee about ordering between the streams,
+ # so let's shuffle them around a bit when we are in torture mode.
+ all_streams = list(all_streams)
+ random.shuffle(all_streams)
+
+ for stream in all_streams:
if stream.last_token == stream.upto_token:
continue
+ if self._replication_torture_level:
+ yield self.clock.sleep(
+ self._replication_torture_level / 1000.0
+ )
+
logger.debug(
"Getting stream: %s: %s -> %s",
stream.NAME, stream.last_token, stream.upto_token
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
new file mode 100644
index 0000000..634f636
--- /dev/null
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Vector Creations Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Defines all the valid streams that clients can subscribe to, and the format
+of the rows returned by each stream.
+
+Each stream is defined by the following information:
+
+ stream name: The name of the stream
+ row type: The type that is used to serialise/deserialse the row
+ current_token: The function that returns the current token for the stream
+ update_function: The function that returns a list of updates between two tokens
+"""
+
+from . import _base, events, federation
+
+STREAMS_MAP = {
+ stream.NAME: stream
+ for stream in (
+ events.EventsStream,
+ _base.BackfillStream,
+ _base.PresenceStream,
+ _base.TypingStream,
+ _base.ReceiptsStream,
+ _base.PushRulesStream,
+ _base.PushersStream,
+ _base.CachesStream,
+ _base.PublicRoomsStream,
+ _base.DeviceListsStream,
+ _base.ToDeviceStream,
+ federation.FederationStream,
+ _base.TagAccountDataStream,
+ _base.AccountDataStream,
+ _base.GroupServerStream,
+ )
+}
diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams/_base.py
similarity index 77%
rename from synapse/replication/tcp/streams.py
rename to synapse/replication/tcp/streams/_base.py
index c1e626b..b6ce7a7 100644
--- a/synapse/replication/tcp/streams.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
+# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,17 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Defines all the valid streams that clients can subscribe to, and the format
-of the rows returned by each stream.
-
-Each stream is defined by the following information:
-
- stream name: The name of the stream
- row type: The type that is used to serialise/deserialse the row
- current_token: The function that returns the current token for the stream
- update_function: The function that returns a list of updates between two tokens
-"""
+import itertools
import logging
from collections import namedtuple
@@ -34,20 +26,13 @@ logger = logging.getLogger(__name__)
MAX_EVENTS_BEHIND = 10000
-
-EventStreamRow = namedtuple("EventStreamRow", (
- "event_id", # str
- "room_id", # str
- "type", # str
- "state_key", # str, optional
- "redacts", # str, optional
-))
BackfillStreamRow = namedtuple("BackfillStreamRow", (
"event_id", # str
"room_id", # str
"type", # str
"state_key", # str, optional
"redacts", # str, optional
+ "relates_to", # str, optional
))
PresenceStreamRow = namedtuple("PresenceStreamRow", (
"user_id", # str
@@ -96,10 +81,6 @@ DeviceListsStreamRow = namedtuple("DeviceListsStreamRow", (
ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", (
"entity", # str
))
-FederationStreamRow = namedtuple("FederationStreamRow", (
- "type", # str, the type of data as defined in the BaseFederationRows
- "data", # dict, serialization of a federation.send_queue.BaseFederationRow
-))
TagAccountDataStreamRow = namedtuple("TagAccountDataStreamRow", (
"user_id", # str
"room_id", # str
@@ -111,12 +92,6 @@ AccountDataStreamRow = namedtuple("AccountDataStream", (
"data_type", # str
"data", # dict
))
-CurrentStateDeltaStreamRow = namedtuple("CurrentStateDeltaStream", (
- "room_id", # str
- "type", # str
- "state_key", # str
- "event_id", # str, optional
-))
GroupsStreamRow = namedtuple("GroupsStreamRow", (
"group_id", # str
"user_id", # str
@@ -132,9 +107,24 @@ class Stream(object):
time it was called up until the point `advance_current_token` was called.
"""
NAME = None # The name of the stream
- ROW_TYPE = None # The type of the row
+ ROW_TYPE = None # The type of the row. Used by the default impl of parse_row.
_LIMITED = True # Whether the update function takes a limit
+ @classmethod
+ def parse_row(cls, row):
+ """Parse a row received over replication
+
+ By default, assumes that the row data is an array object and passes its contents
+ to the constructor of the ROW_TYPE for this stream.
+
+ Args:
+ row: row data from the incoming RDATA command, after json decoding
+
+ Returns:
+ ROW_TYPE object for this stream
+ """
+ return cls.ROW_TYPE(*row)
+
def __init__(self, hs):
# The token from which we last asked for updates
self.last_token = self.current_token()
@@ -162,8 +152,10 @@ class Stream(object):
until the `upto_token`
Returns:
- (list(ROW_TYPE), int): list of updates plus the token used as an
- upper bound of the updates (i.e. the "current token")
+ Deferred[Tuple[List[Tuple[int, Any]], int]:
+ Resolves to a pair ``(updates, current_token)``, where ``updates`` is a
+ list of ``(token, row)`` entries. ``row`` will be json-serialised and
+ sent over the replication steam.
"""
updates, current_token = yield self.get_updates_since(self.last_token)
self.last_token = current_token
@@ -176,8 +168,10 @@ class Stream(object):
stream updates
Returns:
- (list(ROW_TYPE), int): list of updates plus the token used as an
- upper bound of the updates (i.e. the "current token")
+ Deferred[Tuple[List[Tuple[int, Any]], int]:
+ Resolves to a pair ``(updates, current_token)``, where ``updates`` is a
+ list of ``(token, row)`` entries. ``row`` will be json-serialised and
+ sent over the replication steam.
"""
if from_token in ("NOW", "now"):
defer.returnValue(([], self.upto_token))
@@ -195,14 +189,19 @@ class Stream(object):
limit=MAX_EVENTS_BEHIND + 1,
)
- if len(rows) >= MAX_EVENTS_BEHIND:
- raise Exception("stream %s has fallen behind" % (self.NAME))
+ # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
+ rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
else:
rows = yield self.update_function(
from_token, current_token,
)
- updates = [(row[0], self.ROW_TYPE(*row[1:])) for row in rows]
+ updates = [(row[0], row[1:]) for row in rows]
+
+ # check we didn't get more rows than the limit.
+ # doing it like this allows the update_function to be a generator.
+ if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
+ raise Exception("stream %s has fallen behind" % (self.NAME))
defer.returnValue((updates, current_token))
@@ -227,20 +226,6 @@ class Stream(object):
raise NotImplementedError()
-class EventsStream(Stream):
- """We received a new event, or an event went from being an outlier to not
- """
- NAME = "events"
- ROW_TYPE = EventStreamRow
-
- def __init__(self, hs):
- store = hs.get_datastore()
- self.current_token = store.get_current_events_token
- self.update_function = store.get_all_new_forward_event_rows
-
- super(EventsStream, self).__init__(hs)
-
-
class BackfillStream(Stream):
"""We fetched some old events and either we had never seen that event before
or it went from being an outlier to not.
@@ -395,22 +380,6 @@ class ToDeviceStream(Stream):
super(ToDeviceStream, self).__init__(hs)
-class FederationStream(Stream):
- """Data to be sent over federation. Only available when master has federation
- sending disabled.
- """
- NAME = "federation"
- ROW_TYPE = FederationStreamRow
-
- def __init__(self, hs):
- federation_sender = hs.get_federation_sender()
-
- self.current_token = federation_sender.get_current_token
- self.update_function = federation_sender.get_replication_rows
-
- super(FederationStream, self).__init__(hs)
-
-
class TagAccountDataStream(Stream):
"""Someone added/removed a tag for a room
"""
@@ -454,21 +423,6 @@ class AccountDataStream(Stream):
defer.returnValue(results)
-class CurrentStateDeltaStream(Stream):
- """Current state for a room was changed
- """
- NAME = "current_state_deltas"
- ROW_TYPE = CurrentStateDeltaStreamRow
-
- def __init__(self, hs):
- store = hs.get_datastore()
-
- self.current_token = store.get_max_current_state_delta_stream_id
- self.update_function = store.get_all_updated_current_state_deltas
-
- super(CurrentStateDeltaStream, self).__init__(hs)
-
-
class GroupServerStream(Stream):
NAME = "groups"
ROW_TYPE = GroupsStreamRow
@@ -480,26 +434,3 @@ class GroupServerStream(Stream):
self.update_function = store.get_all_groups_changes
super(GroupServerStream, self).__init__(hs)
-
-
-STREAMS_MAP = {
- stream.NAME: stream
- for stream in (
- EventsStream,
- BackfillStream,
- PresenceStream,
- TypingStream,
- ReceiptsStream,
- PushRulesStream,
- PushersStream,
- CachesStream,
- PublicRoomsStream,
- DeviceListsStream,
- ToDeviceStream,
- FederationStream,
- TagAccountDataStream,
- AccountDataStream,
- CurrentStateDeltaStream,
- GroupServerStream,
- )
-}
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
new file mode 100644
index 0000000..f1290d0
--- /dev/null
+++ b/synapse/replication/tcp/streams/events.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Vector Creations Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import heapq
+
+import attr
+
+from twisted.internet import defer
+
+from ._base import Stream
+
+
+"""Handling of the 'events' replication stream
+
+This stream contains rows of various types. Each row therefore contains a 'type'
+identifier before the real data. For example::
+
+ RDATA events batch ["state", ["!room:id", "m.type", "", "$event:id"]]
+ RDATA events 12345 ["ev", ["$event:id", "!room:id", "m.type", null, null]]
+
+An "ev" row is sent for each new event. The fields in the data part are:
+
+ * The new event id
+ * The room id for the event
+ * The type of the new event
+ * The state key of the event, for state events
+ * The event id of an event which is redacted by this event.
+
+A "state" row is sent whenever the "current state" in a room changes. The fields in the
+data part are:
+
+ * The room id for the state change
+ * The event type of the state which has changed
+ * The state_key of the state which has changed
+ * The event id of the new state
+
+"""
+
+
+@attr.s(slots=True, frozen=True)
+class EventsStreamRow(object):
+ """A parsed row from the events replication stream"""
+ type = attr.ib() # str: the TypeId of one of the *EventsStreamRows
+ data = attr.ib() # BaseEventsStreamRow
+
+
+class BaseEventsStreamRow(object):
+ """Base class for rows to be sent in the events stream.
+
+ Specifies how to identify, serialize and deserialize the different types.
+ """
+
+ TypeId = None # Unique string that ids the type. Must be overriden in sub classes.
+
+ @classmethod
+ def from_data(cls, data):
+ """Parse the data from the replication stream into a row.
+
+ By default we just call the constructor with the data list as arguments
+
+ Args:
+ data: The value of the data object from the replication stream
+ """
+ return cls(*data)
+
+
+@attr.s(slots=True, frozen=True)
+class EventsStreamEventRow(BaseEventsStreamRow):
+ TypeId = "ev"
+
+ event_id = attr.ib() # str
+ room_id = attr.ib() # str
+ type = attr.ib() # str
+ state_key = attr.ib() # str, optional
+ redacts = attr.ib() # str, optional
+ relates_to = attr.ib() # str, optional
+
+
+@attr.s(slots=True, frozen=True)
+class EventsStreamCurrentStateRow(BaseEventsStreamRow):
+ TypeId = "state"
+
+ room_id = attr.ib() # str
+ type = attr.ib() # str
+ state_key = attr.ib() # str
+ event_id = attr.ib() # str, optional
+
+
+TypeToRow = {
+ Row.TypeId: Row
+ for Row in (
+ EventsStreamEventRow,
+ EventsStreamCurrentStateRow,
+ )
+}
+
+
+class EventsStream(Stream):
+ """We received a new event, or an event went from being an outlier to not
+ """
+ NAME = "events"
+
+ def __init__(self, hs):
+ self._store = hs.get_datastore()
+ self.current_token = self._store.get_current_events_token
+
+ super(EventsStream, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def update_function(self, from_token, current_token, limit=None):
+ event_rows = yield self._store.get_all_new_forward_event_rows(
+ from_token, current_token, limit,
+ )
+ event_updates = (
+ (row[0], EventsStreamEventRow.TypeId, row[1:])
+ for row in event_rows
+ )
+
+ state_rows = yield self._store.get_all_updated_current_state_deltas(
+ from_token, current_token, limit
+ )
+ state_updates = (
+ (row[0], EventsStreamCurrentStateRow.TypeId, row[1:])
+ for row in state_rows
+ )
+
+ all_updates = heapq.merge(event_updates, state_updates)
+
+ defer.returnValue(all_updates)
+
+ @classmethod
+ def parse_row(cls, row):
+ (typ, data) = row
+ data = TypeToRow[typ].from_data(data)
+ return EventsStreamRow(typ, data)
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
new file mode 100644
index 0000000..9aa43aa
--- /dev/null
+++ b/synapse/replication/tcp/streams/federation.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Vector Creations Ltd
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from collections import namedtuple
+
+from ._base import Stream
+
+FederationStreamRow = namedtuple("FederationStreamRow", (
+ "type", # str, the type of data as defined in the BaseFederationRows
+ "data", # dict, serialization of a federation.send_queue.BaseFederationRow
+))
+
+
+class FederationStream(Stream):
+ """Data to be sent over federation. Only available when master has federation
+ sending disabled.
+ """
+ NAME = "federation"
+ ROW_TYPE = FederationStreamRow
+
+ def __init__(self, hs):
+ federation_sender = hs.get_federation_sender()
+
+ self.current_token = federation_sender.get_current_token
+ self.update_function = federation_sender.get_replication_rows
+
+ super(FederationStream, self).__init__(hs)
diff --git a/synapse/res/templates/mail-expiry.css b/synapse/res/templates/mail-expiry.css
new file mode 100644
index 0000000..3dea486
--- /dev/null
+++ b/synapse/res/templates/mail-expiry.css
@@ -0,0 +1,4 @@
+.noticetext {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html
new file mode 100644
index 0000000..f0d7c66
--- /dev/null
+++ b/synapse/res/templates/notice_expiry.html
@@ -0,0 +1,43 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <style type="text/css">
+ {% include 'mail.css' without context %}
+ {% include "mail-%s.css" % app_name ignore missing without context %}
+ {% include 'mail-expiry.css' without context %}
+ </style>
+ </head>
+ <body>
+ <table id="page">
+ <tr>
+ <td> </td>
+ <td id="inner">
+ <table class="header">
+ <tr>
+ <td>
+ <div class="salutation">Hi {{ display_name }},</div>
+ </td>
+ <td class="logo">
+ {% if app_name == "Riot" %}
+ <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {% elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {% else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {% endif %}
+ </td>
+ </tr>
+ <tr>
+ <td colspan="2">
+ <div class="noticetext">Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.</div>
+ <div class="noticetext">To extend the validity of your account, please click on the link bellow (or copy and paste it into a new browser tab):</div>
+ <div class="noticetext"><a href="{{ url }}">{{ url }}</a></div>
+ </td>
+ </tr>
+ </table>
+ </td>
+ <td> </td>
+ </tr>
+ </table>
+ </body>
+</html>
diff --git a/synapse/res/templates/notice_expiry.txt b/synapse/res/templates/notice_expiry.txt
new file mode 100644
index 0000000..41f1c42
--- /dev/null
+++ b/synapse/res/templates/notice_expiry.txt
@@ -0,0 +1,7 @@
+Hi {{ display_name }},
+
+Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.
+
+To extend the validity of your account, please click on the link bellow (or copy and paste it to a new browser tab):
+
+{{ url }}
diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html
index 88b921c..1a6c70b 100644
--- a/synapse/res/templates/notif.html
+++ b/synapse/res/templates/notif.html
@@ -6,11 +6,11 @@
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
{% else %}
{% if message.sender_hash % 3 == 0 %}
- <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
+ <img class="sender_avatar" src="https://riot.im/img/external/avatar-1.png" />
{% elif message.sender_hash % 3 == 1 %}
- <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
+ <img class="sender_avatar" src="https://riot.im/img/external/avatar-2.png" />
{% else %}
- <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
+ <img class="sender_avatar" src="https://riot.im/img/external/avatar-3.png" />
{% endif %}
{% endif %}
{% endif %}
diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html
index fcdb310..019506e 100644
--- a/synapse/res/templates/notif_mail.html
+++ b/synapse/res/templates/notif_mail.html
@@ -19,7 +19,7 @@
</td>
<td class="logo">
{% if app_name == "Riot" %}
- <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %}
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% else %}
diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html
index 723c222..b8525fe 100644
--- a/synapse/res/templates/room.html
+++ b/synapse/res/templates/room.html
@@ -5,11 +5,11 @@
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
{% else %}
{% if room.hash % 3 == 0 %}
- <img alt="" src="https://vector.im/beta/img/76cfa6.png" />
+ <img alt="" src="https://riot.im/img/external/avatar-1.png" />
{% elif room.hash % 3 == 1 %}
- <img alt="" src="https://vector.im/beta/img/50e2c2.png" />
+ <img alt="" src="https://riot.im/img/external/avatar-2.png" />
{% else %}
- <img alt="" src="https://vector.im/beta/img/f4c371.png" />
+ <img alt="" src="https://riot.im/img/external/avatar-3.png" />
{% endif %}
{% endif %}
</td>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 91f5247..e6110ad 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -13,11 +13,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import synapse.rest.admin
from synapse.http.server import JsonResource
from synapse.rest.client import versions
from synapse.rest.client.v1 import (
- admin,
directory,
events,
initial_sync,
@@ -33,6 +32,7 @@ from synapse.rest.client.v1 import (
from synapse.rest.client.v2_alpha import (
account,
account_data,
+ account_validity,
auth,
capabilities,
devices,
@@ -44,6 +44,7 @@ from synapse.rest.client.v2_alpha import (
read_marker,
receipts,
register,
+ relations,
report_event,
room_keys,
room_upgrade_rest_servlet,
@@ -57,8 +58,14 @@ from synapse.rest.client.v2_alpha import (
class ClientRestResource(JsonResource):
- """A resource for version 1 of the matrix client API."""
+ """Matrix Client API REST resource.
+ This gets mounted at various points under /_matrix/client, including:
+ * /_matrix/client/r0
+ * /_matrix/client/api/v1
+ * /_matrix/client/unstable
+ * etc
+ """
def __init__(self, hs):
JsonResource.__init__(self, hs, canonical_json=False)
self.register_servlets(self, hs)
@@ -81,7 +88,6 @@ class ClientRestResource(JsonResource):
presence.register_servlets(hs, client_resource)
directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource)
- admin.register_servlets(hs, client_resource)
pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource)
logout.register_servlets(hs, client_resource)
@@ -109,3 +115,10 @@ class ClientRestResource(JsonResource):
groups.register_servlets(hs, client_resource)
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)
+ account_validity.register_servlets(hs, client_resource)
+ relations.register_servlets(hs, client_resource)
+
+ # moving to /_synapse/admin
+ synapse.rest.admin.register_servlets_for_client_rest_resource(
+ hs, client_resource
+ )
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/admin/__init__.py
similarity index 69%
rename from synapse/rest/client/v1/admin.py
rename to synapse/rest/admin/__init__.py
index 82433a2..744d855 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/admin/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018-2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,46 +17,67 @@
import hashlib
import hmac
import logging
+import platform
+import re
from six import text_type
from six.moves import http_client
from twisted.internet import defer
+import synapse
from synapse.api.constants import Membership, UserTypes
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
+from synapse.http.server import JsonResource
from synapse.http.servlet import (
+ RestServlet,
assert_params_in_dict,
parse_integer,
parse_json_object_from_request,
parse_string,
)
+from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin
+from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.types import UserID, create_requester
-
-from .base import ClientV1RestServlet, client_path_patterns
+from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
-class UsersRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns("/admin/users/(?P<user_id>[^/]*)")
+def historical_admin_path_patterns(path_regex):
+ """Returns the list of patterns for an admin endpoint, including historical ones
+
+ This is a backwards-compatibility hack. Previously, the Admin API was exposed at
+ various paths under /_matrix/client. This function returns a list of patterns
+ matching those paths (as well as the new one), so that existing scripts which rely
+ on the endpoints being available there are not broken.
+
+ Note that this should only be used for existing endpoints: new ones should just
+ register for the /_synapse/admin path.
+ """
+ return list(
+ re.compile(prefix + path_regex)
+ for prefix in (
+ "^/_synapse/admin/v1",
+ "^/_matrix/client/api/v1/admin",
+ "^/_matrix/client/unstable/admin",
+ "^/_matrix/client/r0/admin"
+ )
+ )
+
+
+class UsersRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)")
def __init__(self, hs):
- super(UsersRestServlet, self).__init__(hs)
+ self.hs = hs
+ self.auth = hs.get_auth()
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
def on_GET(self, request, user_id):
target_user = UserID.from_string(user_id)
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
-
- # To allow all users to get the users list
- # if not is_admin and target_user != auth_user:
- # raise AuthError(403, "You are not a server admin")
+ yield assert_requester_is_admin(self.auth, request)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only users a local user")
@@ -66,18 +87,30 @@ class UsersRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
-class UserRegisterServlet(ClientV1RestServlet):
+class VersionServlet(RestServlet):
+ PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"), )
+
+ def __init__(self, hs):
+ self.res = {
+ 'server_version': get_version_string(synapse),
+ 'python_version': platform.python_version(),
+ }
+
+ def on_GET(self, request):
+ return 200, self.res
+
+
+class UserRegisterServlet(RestServlet):
"""
Attributes:
NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
nonces (dict[str, int]): The nonces that we will accept. A dict of
nonce to the time it was generated, in int seconds.
"""
- PATTERNS = client_path_patterns("/admin/register")
+ PATTERNS = historical_admin_path_patterns("/register")
NONCE_TIMEOUT = 60
def __init__(self, hs):
- super(UserRegisterServlet, self).__init__(hs)
self.handlers = hs.get_handlers()
self.reactor = hs.get_reactor()
self.nonces = {}
@@ -204,11 +237,12 @@ class UserRegisterServlet(ClientV1RestServlet):
defer.returnValue((200, result))
-class WhoisRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns("/admin/whois/(?P<user_id>[^/]*)")
+class WhoisRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
def __init__(self, hs):
- super(WhoisRestServlet, self).__init__(hs)
+ self.hs = hs
+ self.auth = hs.get_auth()
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
@@ -216,10 +250,9 @@ class WhoisRestServlet(ClientV1RestServlet):
target_user = UserID.from_string(user_id)
requester = yield self.auth.get_user_by_req(request)
auth_user = requester.user
- is_admin = yield self.auth.is_server_admin(requester.user)
- if not is_admin and target_user != auth_user:
- raise AuthError(403, "You are not a server admin")
+ if target_user != auth_user:
+ yield assert_user_is_admin(self.auth, auth_user)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only whois a local user")
@@ -229,20 +262,16 @@ class WhoisRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
-class PurgeMediaCacheRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns("/admin/purge_media_cache")
+class PurgeMediaCacheRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns("/purge_media_cache")
def __init__(self, hs):
self.media_repository = hs.get_media_repository()
- super(PurgeMediaCacheRestServlet, self).__init__(hs)
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request):
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ yield assert_requester_is_admin(self.auth, request)
before_ts = parse_integer(request, "before_ts", required=True)
logger.info("before_ts: %r", before_ts)
@@ -252,9 +281,9 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
-class PurgeHistoryRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns(
- "/admin/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
+class PurgeHistoryRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns(
+ "/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
)
def __init__(self, hs):
@@ -263,17 +292,13 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
Args:
hs (synapse.server.HomeServer)
"""
- super(PurgeHistoryRestServlet, self).__init__(hs)
self.pagination_handler = hs.get_pagination_handler()
self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request, room_id, event_id):
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ yield assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request, allow_empty_body=True)
@@ -349,9 +374,9 @@ class PurgeHistoryRestServlet(ClientV1RestServlet):
}))
-class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns(
- "/admin/purge_history_status/(?P<purge_id>[^/]+)"
+class PurgeHistoryStatusRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns(
+ "/purge_history_status/(?P<purge_id>[^/]+)"
)
def __init__(self, hs):
@@ -360,16 +385,12 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
Args:
hs (synapse.server.HomeServer)
"""
- super(PurgeHistoryStatusRestServlet, self).__init__(hs)
self.pagination_handler = hs.get_pagination_handler()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_GET(self, request, purge_id):
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ yield assert_requester_is_admin(self.auth, request)
purge_status = self.pagination_handler.get_purge_status(purge_id)
if purge_status is None:
@@ -378,15 +399,16 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet):
defer.returnValue((200, purge_status.asdict()))
-class DeactivateAccountRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns("/admin/deactivate/(?P<target_user_id>[^/]*)")
+class DeactivateAccountRestServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
- super(DeactivateAccountRestServlet, self).__init__(hs)
self._deactivate_account_handler = hs.get_deactivate_account_handler()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request, target_user_id):
+ yield assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request, allow_empty_body=True)
erase = body.get("erase", False)
if not isinstance(erase, bool):
@@ -397,11 +419,6 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
)
UserID.from_string(target_user_id)
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
result = yield self._deactivate_account_handler.deactivate_account(
target_user_id, erase,
@@ -416,13 +433,13 @@ class DeactivateAccountRestServlet(ClientV1RestServlet):
}))
-class ShutdownRoomRestServlet(ClientV1RestServlet):
+class ShutdownRoomRestServlet(RestServlet):
"""Shuts down a room by removing all local users from the room and blocking
all future invites and joins to the room. Any local aliases will be repointed
to a new room created by `new_room_user_id` and kicked users will be auto
joined to the new room.
"""
- PATTERNS = client_path_patterns("/admin/shutdown_room/(?P<room_id>[^/]+)")
+ PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P<room_id>[^/]+)")
DEFAULT_MESSAGE = (
"Sharing illegal content on this server is not permitted and rooms in"
@@ -430,19 +447,18 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
)
def __init__(self, hs):
- super(ShutdownRoomRestServlet, self).__init__(hs)
+ self.hs = hs
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self._room_creation_handler = hs.get_room_creation_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request, room_id):
requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ yield assert_user_is_admin(self.auth, requester.user)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ["new_room_user_id"])
@@ -466,53 +482,67 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
)
new_room_id = info["room_id"]
- yield self.event_creation_handler.create_and_send_nonmember_event(
- room_creator_requester,
- {
- "type": "m.room.message",
- "content": {"body": message, "msgtype": "m.text"},
- "room_id": new_room_id,
- "sender": new_room_user_id,
- },
- ratelimit=False,
- )
-
requester_user_id = requester.user.to_string()
- logger.info("Shutting down room %r", room_id)
+ logger.info(
+ "Shutting down room %r, joining to new room: %r",
+ room_id, new_room_id,
+ )
+ # This will work even if the room is already blocked, but that is
+ # desirable in case the first attempt at blocking the room failed below.
yield self.store.block_room(room_id, requester_user_id)
- users = yield self.state.get_current_user_in_room(room_id)
+ users = yield self.state.get_current_users_in_room(room_id)
kicked_users = []
+ failed_to_kick_users = []
for user_id in users:
if not self.hs.is_mine_id(user_id):
continue
logger.info("Kicking %r from %r...", user_id, room_id)
- target_requester = create_requester(user_id)
- yield self.room_member_handler.update_membership(
- requester=target_requester,
- target=target_requester.user,
- room_id=room_id,
- action=Membership.LEAVE,
- content={},
- ratelimit=False
- )
+ try:
+ target_requester = create_requester(user_id)
+ yield self.room_member_handler.update_membership(
+ requester=target_requester,
+ target=target_requester.user,
+ room_id=room_id,
+ action=Membership.LEAVE,
+ content={},
+ ratelimit=False,
+ require_consent=False,
+ )
- yield self.room_member_handler.forget(target_requester.user, room_id)
+ yield self.room_member_handler.forget(target_requester.user, room_id)
- yield self.room_member_handler.update_membership(
- requester=target_requester,
- target=target_requester.user,
- room_id=new_room_id,
- action=Membership.JOIN,
- content={},
- ratelimit=False
- )
+ yield self.room_member_handler.update_membership(
+ requester=target_requester,
+ target=target_requester.user,
+ room_id=new_room_id,
+ action=Membership.JOIN,
+ content={},
+ ratelimit=False,
+ require_consent=False,
+ )
- kicked_users.append(user_id)
+ kicked_users.append(user_id)
+ except Exception:
+ logger.exception(
+ "Failed to leave old room and join new room for %r", user_id,
+ )
+ failed_to_kick_users.append(user_id)
+
+ yield self.event_creation_handler.create_and_send_nonmember_event(
+ room_creator_requester,
+ {
+ "type": "m.room.message",
+ "content": {"body": message, "msgtype": "m.text"},
+ "room_id": new_room_id,
+ "sender": new_room_user_id,
+ },
+ ratelimit=False,
+ )
aliases_for_room = yield self.store.get_aliases_for_room(room_id)
@@ -522,27 +552,26 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
defer.returnValue((200, {
"kicked_users": kicked_users,
+ "failed_to_kick_users": failed_to_kick_users,
"local_aliases": aliases_for_room,
"new_room_id": new_room_id,
}))
-class QuarantineMediaInRoom(ClientV1RestServlet):
+class QuarantineMediaInRoom(RestServlet):
"""Quarantines all media in a room so that no one can download it via
this server.
"""
- PATTERNS = client_path_patterns("/admin/quarantine_media/(?P<room_id>[^/]+)")
+ PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
def __init__(self, hs):
- super(QuarantineMediaInRoom, self).__init__(hs)
self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request, room_id):
requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ yield assert_user_is_admin(self.auth, requester.user)
num_quarantined = yield self.store.quarantine_media_ids_in_room(
room_id, requester.user.to_string(),
@@ -551,13 +580,12 @@ class QuarantineMediaInRoom(ClientV1RestServlet):
defer.returnValue((200, {"num_quarantined": num_quarantined}))
-class ListMediaInRoom(ClientV1RestServlet):
+class ListMediaInRoom(RestServlet):
"""Lists all of the media in a given room.
"""
- PATTERNS = client_path_patterns("/admin/room/(?P<room_id>[^/]+)/media")
+ PATTERNS = historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media")
def __init__(self, hs):
- super(ListMediaInRoom, self).__init__(hs)
self.store = hs.get_datastore()
@defer.inlineCallbacks
@@ -572,11 +600,11 @@ class ListMediaInRoom(ClientV1RestServlet):
defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs}))
-class ResetPasswordRestServlet(ClientV1RestServlet):
+class ResetPasswordRestServlet(RestServlet):
"""Post request to allow an administrator reset password for a user.
This needs user to have administrator access in Synapse.
Example:
- http://localhost:8008/_matrix/client/api/v1/admin/reset_password/
+ http://localhost:8008/_synapse/admin/v1/reset_password/
@user:to_reset_password?access_token=admin_access_token
JsonBodyToSend:
{
@@ -585,11 +613,10 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
Returns:
200 OK with empty object if success otherwise an error.
"""
- PATTERNS = client_path_patterns("/admin/reset_password/(?P<target_user_id>[^/]*)")
+ PATTERNS = historical_admin_path_patterns("/reset_password/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
self.store = hs.get_datastore()
- super(ResetPasswordRestServlet, self).__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
self._set_password_handler = hs.get_set_password_handler()
@@ -599,39 +626,34 @@ class ResetPasswordRestServlet(ClientV1RestServlet):
"""Post request to allow an administrator reset password for a user.
This needs user to have administrator access in Synapse.
"""
- UserID.from_string(target_user_id)
requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
+ yield assert_user_is_admin(self.auth, requester.user)
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ UserID.from_string(target_user_id)
params = parse_json_object_from_request(request)
assert_params_in_dict(params, ["new_password"])
new_password = params['new_password']
- logger.info("new_password: %r", new_password)
-
yield self._set_password_handler.set_password(
target_user_id, new_password, requester
)
defer.returnValue((200, {}))
-class GetUsersPaginatedRestServlet(ClientV1RestServlet):
+class GetUsersPaginatedRestServlet(RestServlet):
"""Get request to get specific number of users from Synapse.
This needs user to have administrator access in Synapse.
Example:
- http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
+ http://localhost:8008/_synapse/admin/v1/users_paginate/
@admin:user?access_token=admin_access_token&start=0&limit=10
Returns:
200 OK with json object {list[dict[str, Any]], count} or empty object.
"""
- PATTERNS = client_path_patterns("/admin/users_paginate/(?P<target_user_id>[^/]*)")
+ PATTERNS = historical_admin_path_patterns("/users_paginate/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
self.store = hs.get_datastore()
- super(GetUsersPaginatedRestServlet, self).__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
self.handlers = hs.get_handlers()
@@ -641,16 +663,9 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
"""Get request to get specific number of users from Synapse.
This needs user to have administrator access in Synapse.
"""
- target_user = UserID.from_string(target_user_id)
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
+ yield assert_requester_is_admin(self.auth, request)
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
-
- # To allow all users to get the users list
- # if not is_admin and target_user != auth_user:
- # raise AuthError(403, "You are not a server admin")
+ target_user = UserID.from_string(target_user_id)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only users a local user")
@@ -671,7 +686,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
"""Post request to get specific number of users from Synapse..
This needs user to have administrator access in Synapse.
Example:
- http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/
+ http://localhost:8008/_synapse/admin/v1/users_paginate/
@admin:user?access_token=admin_access_token
JsonBodyToSend:
{
@@ -681,12 +696,8 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
Returns:
200 OK with json object {list[dict[str, Any]], count} or empty object.
"""
+ yield assert_requester_is_admin(self.auth, request)
UserID.from_string(target_user_id)
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
-
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
order = "name" # order by name in user table
params = parse_json_object_from_request(request)
@@ -701,21 +712,20 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
-class SearchUsersRestServlet(ClientV1RestServlet):
+class SearchUsersRestServlet(RestServlet):
"""Get request to search user table for specific users according to
search term.
This needs user to have administrator access in Synapse.
Example:
- http://localhost:8008/_matrix/client/api/v1/admin/search_users/
+ http://localhost:8008/_synapse/admin/v1/search_users/
@admin:user?access_token=admin_access_token&term=alice
Returns:
200 OK with json object {list[dict[str, Any]], count} or empty object.
"""
- PATTERNS = client_path_patterns("/admin/search_users/(?P<target_user_id>[^/]*)")
+ PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
self.store = hs.get_datastore()
- super(SearchUsersRestServlet, self).__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
self.handlers = hs.get_handlers()
@@ -726,12 +736,9 @@ class SearchUsersRestServlet(ClientV1RestServlet):
search term.
This needs user to have a administrator access in Synapse.
"""
- target_user = UserID.from_string(target_user_id)
- requester = yield self.auth.get_user_by_req(request)
- is_admin = yield self.auth.is_server_admin(requester.user)
+ yield assert_requester_is_admin(self.auth, request)
- if not is_admin:
- raise AuthError(403, "You are not a server admin")
+ target_user = UserID.from_string(target_user_id)
# To allow all users to get the users list
# if not is_admin and target_user != auth_user:
@@ -749,7 +756,80 @@ class SearchUsersRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
-def register_servlets(hs, http_server):
+class DeleteGroupAdminRestServlet(RestServlet):
+ """Allows deleting of local groups
+ """
+ PATTERNS = historical_admin_path_patterns("/delete_group/(?P<group_id>[^/]*)")
+
+ def __init__(self, hs):
+ self.group_server = hs.get_groups_server_handler()
+ self.is_mine_id = hs.is_mine_id
+ self.auth = hs.get_auth()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, group_id):
+ requester = yield self.auth.get_user_by_req(request)
+ yield assert_user_is_admin(self.auth, requester.user)
+
+ if not self.is_mine_id(group_id):
+ raise SynapseError(400, "Can only delete local groups")
+
+ yield self.group_server.delete_group(group_id, requester.user.to_string())
+ defer.returnValue((200, {}))
+
+
+class AccountValidityRenewServlet(RestServlet):
+ PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ self.hs = hs
+ self.account_activity_handler = hs.get_account_validity_handler()
+ self.auth = hs.get_auth()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ yield assert_requester_is_admin(self.auth, request)
+
+ body = parse_json_object_from_request(request)
+
+ if "user_id" not in body:
+ raise SynapseError(400, "Missing property 'user_id' in the request body")
+
+ expiration_ts = yield self.account_activity_handler.renew_account_for_user(
+ body["user_id"], body.get("expiration_ts"),
+ not body.get("enable_renewal_emails", True),
+ )
+
+ res = {
+ "expiration_ts": expiration_ts,
+ }
+ defer.returnValue((200, res))
+
+########################################################################################
+#
+# please don't add more servlets here: this file is already long and unwieldy. Put
+# them in separate files within the 'admin' package.
+#
+########################################################################################
+
+
+class AdminRestResource(JsonResource):
+ """The REST resource which gets mounted at /_synapse/admin"""
+
+ def __init__(self, hs):
+ JsonResource.__init__(self, hs, canonical_json=False)
+
+ register_servlets_for_client_rest_resource(hs, self)
+ SendServerNoticeServlet(hs).register(self)
+ VersionServlet(hs).register(self)
+
+
+def register_servlets_for_client_rest_resource(hs, http_server):
+ """Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server)
PurgeMediaCacheRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server)
@@ -763,3 +843,7 @@ def register_servlets(hs, http_server):
QuarantineMediaInRoom(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
UserRegisterServlet(hs).register(http_server)
+ DeleteGroupAdminRestServlet(hs).register(http_server)
+ AccountValidityRenewServlet(hs).register(http_server)
+ # don't add more things here: new servlets should only be exposed on
+ # /_synapse/admin so should not go here. Instead register them in AdminRestResource.
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
new file mode 100644
index 0000000..881d67b
--- /dev/null
+++ b/synapse/rest/admin/_base.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+
+from synapse.api.errors import AuthError
+
+
+@defer.inlineCallbacks
+def assert_requester_is_admin(auth, request):
+ """Verify that the requester is an admin user
+
+ WARNING: MAKE SURE YOU YIELD ON THE RESULT!
+
+ Args:
+ auth (synapse.api.auth.Auth):
+ request (twisted.web.server.Request): incoming request
+
+ Returns:
+ Deferred
+
+ Raises:
+ AuthError if the requester is not an admin
+ """
+ requester = yield auth.get_user_by_req(request)
+ yield assert_user_is_admin(auth, requester.user)
+
+
+@defer.inlineCallbacks
+def assert_user_is_admin(auth, user_id):
+ """Verify that the given user is an admin user
+
+ WARNING: MAKE SURE YOU YIELD ON THE RESULT!
+
+ Args:
+ auth (synapse.api.auth.Auth):
+ user_id (UserID):
+
+ Returns:
+ Deferred
+
+ Raises:
+ AuthError if the user is not an admin
+ """
+
+ is_admin = yield auth.is_server_admin(user_id)
+ if not is_admin:
+ raise AuthError(403, "You are not a server admin")
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
new file mode 100644
index 0000000..ae5aca9
--- /dev/null
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin
+from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.types import UserID
+
+
+class SendServerNoticeServlet(RestServlet):
+ """Servlet which will send a server notice to a given user
+
+ POST /_synapse/admin/v1/send_server_notice
+ {
+ "user_id": "@target_user:server_name",
+ "content": {
+ "msgtype": "m.text",
+ "body": "This is my message"
+ }
+ }
+
+ returns:
+
+ {
+ "event_id": "$1895723857jgskldgujpious"
+ }
+ """
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.txns = HttpTransactionCache(hs)
+ self.snm = hs.get_server_notices_manager()
+
+ def register(self, json_resource):
+ PATTERN = "^/_synapse/admin/v1/send_server_notice"
+ json_resource.register_paths(
+ "POST",
+ (re.compile(PATTERN + "$"), ),
+ self.on_POST,
+ )
+ json_resource.register_paths(
+ "PUT",
+ (re.compile(PATTERN + "/(?P<txn_id>[^/]*)$",), ),
+ self.on_PUT,
+ )
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, txn_id=None):
+ yield assert_requester_is_admin(self.auth, request)
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ("user_id", "content"))
+ event_type = body.get("type", EventTypes.Message)
+ state_key = body.get("state_key")
+
+ if not self.snm.is_enabled():
+ raise SynapseError(400, "Server notices are not enabled on this server")
+
+ user_id = body["user_id"]
+ UserID.from_string(user_id)
+ if not self.hs.is_mine_id(user_id):
+ raise SynapseError(400, "Server notices can only be sent to local users")
+
+ event = yield self.snm.send_notice(
+ user_id=body["user_id"],
+ type=event_type,
+ state_key=state_key,
+ event_content=body["content"],
+ )
+
+ defer.returnValue((200, {"event_id": event.event_id}))
+
+ def on_PUT(self, request, txn_id):
+ return self.txns.fetch_or_execute_request(
+ request, self.on_POST, request, txn_id,
+ )
diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py
index c77d7ab..dc63b66 100644
--- a/synapse/rest/client/v1/base.py
+++ b/synapse/rest/client/v1/base.py
@@ -19,7 +19,7 @@
import logging
import re
-from synapse.api.urls import CLIENT_PREFIX
+from synapse.api.urls import CLIENT_API_PREFIX
from synapse.http.servlet import RestServlet
from synapse.rest.client.transactions import HttpTransactionCache
@@ -36,12 +36,12 @@ def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True):
Returns:
SRE_Pattern
"""
- patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)]
+ patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)]
if include_in_unstable:
- unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable")
+ unstable_prefix = CLIENT_API_PREFIX + "/unstable"
patterns.append(re.compile("^" + unstable_prefix + path_regex))
for release in releases:
- new_prefix = CLIENT_PREFIX.replace("/api/v1", "/r%d" % release)
+ new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
patterns.append(re.compile("^" + new_prefix + path_regex))
return patterns
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index cd9b3bd..c3b0a39 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -19,7 +19,6 @@ import logging
from twisted.internet import defer
from synapse.api.errors import SynapseError
-from synapse.events.utils import serialize_event
from synapse.streams.config import PaginationConfig
from .base import ClientV1RestServlet, client_path_patterns
@@ -84,6 +83,7 @@ class EventRestServlet(ClientV1RestServlet):
super(EventRestServlet, self).__init__(hs)
self.clock = hs.get_clock()
self.event_handler = hs.get_event_handler()
+ self._event_serializer = hs.get_event_client_serializer()
@defer.inlineCallbacks
def on_GET(self, request, event_id):
@@ -92,7 +92,8 @@ class EventRestServlet(ClientV1RestServlet):
time_now = self.clock.time_msec()
if event:
- defer.returnValue((200, serialize_event(event, time_now)))
+ event = yield self._event_serializer.serialize_event(event, time_now)
+ defer.returnValue((200, event))
else:
defer.returnValue((404, "Event not found."))
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 6121c5b..5180e9e 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -22,6 +22,7 @@ from twisted.internet import defer
from twisted.web.client import PartialDownloadError
from synapse.api.errors import Codes, LoginError, SynapseError
+from synapse.api.ratelimiting import Ratelimiter
from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
@@ -97,6 +98,7 @@ class LoginRestServlet(ClientV1RestServlet):
self.registration_handler = hs.get_registration_handler()
self.handlers = hs.get_handlers()
self._well_known_builder = WellKnownBuilder(hs)
+ self._address_ratelimiter = Ratelimiter()
def on_GET(self, request):
flows = []
@@ -129,6 +131,13 @@ class LoginRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_POST(self, request):
+ self._address_ratelimiter.ratelimit(
+ request.getClientIP(), time_now_s=self.hs.clock.time(),
+ rate_hz=self.hs.config.rc_login_address.per_second,
+ burst_count=self.hs.config.rc_login_address.burst_count,
+ update=True,
+ )
+
login_submission = parse_json_object_from_request(request)
try:
if self.jwt_enabled and (login_submission["type"] ==
@@ -192,6 +201,24 @@ class LoginRestServlet(ClientV1RestServlet):
# We store all email addreses as lowercase in the DB.
# (See add_threepid in synapse/handlers/auth.py)
address = address.lower()
+
+ # Check for login providers that support 3pid login types
+ canonical_user_id, callback_3pid = (
+ yield self.auth_handler.check_password_provider_3pid(
+ medium,
+ address,
+ login_submission["password"],
+ )
+ )
+ if canonical_user_id:
+ # Authentication through password provider and 3pid succeeded
+ result = yield self._register_device_with_callback(
+ canonical_user_id, login_submission, callback_3pid,
+ )
+ defer.returnValue(result)
+
+ # No password providers were able to handle this 3pid
+ # Check local store
user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
medium, address,
)
@@ -214,20 +241,43 @@ class LoginRestServlet(ClientV1RestServlet):
if "user" not in identifier:
raise SynapseError(400, "User identifier is missing 'user' key")
- auth_handler = self.auth_handler
- canonical_user_id, callback = yield auth_handler.validate_login(
+ canonical_user_id, callback = yield self.auth_handler.validate_login(
identifier["user"],
login_submission,
)
+ result = yield self._register_device_with_callback(
+ canonical_user_id, login_submission, callback,
+ )
+ defer.returnValue(result)
+
+ @defer.inlineCallbacks
+ def _register_device_with_callback(
+ self,
+ user_id,
+ login_submission,
+ callback=None,
+ ):
+ """ Registers a device with a given user_id. Optionally run a callback
+ function after registration has completed.
+
+ Args:
+ user_id (str): ID of the user to register.
+ login_submission (dict): Dictionary of login information.
+ callback (func|None): Callback function to run after registration.
+
+ Returns:
+ result (Dict[str,str]): Dictionary of account information after
+ successful registration.
+ """
device_id = login_submission.get("device_id")
initial_display_name = login_submission.get("initial_device_display_name")
device_id, access_token = yield self.registration_handler.register_device(
- canonical_user_id, device_id, initial_display_name,
+ user_id, device_id, initial_display_name,
)
result = {
- "user_id": canonical_user_id,
+ "user_id": user_id,
"access_token": access_token,
"home_server": self.hs.hostname,
"device_id": device_id,
@@ -285,6 +335,7 @@ class LoginRestServlet(ClientV1RestServlet):
raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
user_id = UserID(user, self.hs.hostname).to_string()
+
auth_handler = self.auth_handler
registered_user_id = yield auth_handler.check_user_exists(user_id)
if registered_user_id:
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index b5a6d6a..045d5a2 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -93,72 +93,5 @@ class PresenceStatusRestServlet(ClientV1RestServlet):
return (200, {})
-class PresenceListRestServlet(ClientV1RestServlet):
- PATTERNS = client_path_patterns("/presence/list/(?P<user_id>[^/]*)")
-
- def __init__(self, hs):
- super(PresenceListRestServlet, self).__init__(hs)
- self.presence_handler = hs.get_presence_handler()
-
- @defer.inlineCallbacks
- def on_GET(self, request, user_id):
- requester = yield self.auth.get_user_by_req(request)
- user = UserID.from_string(user_id)
-
- if not self.hs.is_mine(user):
- raise SynapseError(400, "User not hosted on this Home Server")
-
- if requester.user != user:
- raise SynapseError(400, "Cannot get another user's presence list")
-
- presence = yield self.presence_handler.get_presence_list(
- observer_user=user, accepted=True
- )
-
- defer.returnValue((200, presence))
-
- @defer.inlineCallbacks
- def on_POST(self, request, user_id):
- requester = yield self.auth.get_user_by_req(request)
- user = UserID.from_string(user_id)
-
- if not self.hs.is_mine(user):
- raise SynapseError(400, "User not hosted on this Home Server")
-
- if requester.user != user:
- raise SynapseError(
- 400, "Cannot modify another user's presence list")
-
- content = parse_json_object_from_request(request)
-
- if "invite" in content:
- for u in content["invite"]:
- if not isinstance(u, string_types):
- raise SynapseError(400, "Bad invite value.")
- if len(u) == 0:
- continue
- invited_user = UserID.from_string(u)
- yield self.presence_handler.send_presence_invite(
- observer_user=user, observed_user=invited_user
- )
-
- if "drop" in content:
- for u in content["drop"]:
- if not isinstance(u, string_types):
- raise SynapseError(400, "Bad drop value.")
- if len(u) == 0:
- continue
- dropped_user = UserID.from_string(u)
- yield self.presence_handler.drop(
- observer_user=user, observed_user=dropped_user
- )
-
- defer.returnValue((200, {}))
-
- def on_OPTIONS(self, request):
- return (200, {})
-
-
def register_servlets(hs, http_server):
PresenceStatusRestServlet(hs).register(http_server)
- PresenceListRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index a23edd
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment