summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
authorSean Quah <8349537+squahtx@users.noreply.github.com>2023-05-03 13:09:20 +0100
committerGitHub <noreply@github.com>2023-05-03 13:09:20 +0100
commit3b837d856c4f867377d738eacb262cad28b14ad7 (patch)
tree8f401560a4c93b84a50ced5a5486c3fc930db852 /synapse
parentUpdate CHANGES.md (diff)
downloadsynapse-3b837d856c4f867377d738eacb262cad28b14ad7.tar.xz
Revert "Reduce the size of the HTTP connection pool for non-pushers" (#15530) v1.83.0rc1
#15514 introduced a regression where Synapse would encounter
`PartialDownloadError`s when fetching OpenID metadata for certain
providers on startup. Due to #8088, this prevents Synapse from starting
entirely.

Revert the change while we decide what to do about the regression.
Diffstat (limited to 'synapse')
-rw-r--r--synapse/http/client.py14
-rw-r--r--synapse/push/httppusher.py3
-rw-r--r--synapse/server.py21
3 files changed, 12 insertions, 26 deletions
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 164abe9fc7..91fe474f36 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -768,7 +768,6 @@ class SimpleHttpClient(BaseHttpClient):
            request if it were otherwise caught in a blacklist.
         use_proxy: Whether proxy settings should be discovered and used
             from conventional environment variables.
-        connection_pool: The connection pool to use for this client's agent.
     """
 
     def __init__(
@@ -778,7 +777,6 @@ class SimpleHttpClient(BaseHttpClient):
         ip_whitelist: Optional[IPSet] = None,
         ip_blacklist: Optional[IPSet] = None,
         use_proxy: bool = False,
-        connection_pool: Optional[HTTPConnectionPool] = None,
     ):
         super().__init__(hs, treq_args=treq_args)
         self._ip_whitelist = ip_whitelist
@@ -791,12 +789,22 @@ class SimpleHttpClient(BaseHttpClient):
                 self.reactor, self._ip_whitelist, self._ip_blacklist
             )
 
+        # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to
+        # do so in batches, so we need to allow the pool to keep lots of idle
+        # connections around.
+        pool = HTTPConnectionPool(self.reactor)
+        # XXX: The justification for using the cache factor here is that larger
+        # instances will need both more cache and more connections.
+        # Still, this should probably be a separate dial
+        pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5)
+        pool.cachedConnectionTimeout = 2 * 60
+
         self.agent: IAgent = ProxyAgent(
             self.reactor,
             hs.get_reactor(),
             connectTimeout=15,
             contextFactory=self.hs.get_http_client_context_factory(),
-            pool=connection_pool,
+            pool=pool,
             use_proxy=use_proxy,
         )
 
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index a01445e374..4f8fa445d9 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -140,8 +140,7 @@ class HttpPusher(Pusher):
             )
 
         self.url = url
-        self.http_client = hs.get_pusher_http_client()
-
+        self.http_client = hs.get_proxied_blacklisted_http_client()
         self.data_minus_url = {}
         self.data_minus_url.update(self.data)
         del self.data_minus_url["url"]
diff --git a/synapse/server.py b/synapse/server.py
index 75a902d64d..08ad97b952 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -27,7 +27,6 @@ from typing_extensions import TypeAlias
 
 from twisted.internet.interfaces import IOpenSSLContextFactory
 from twisted.internet.tcp import Port
-from twisted.web.client import HTTPConnectionPool
 from twisted.web.iweb import IPolicyForHTTPS
 from twisted.web.resource import Resource
 
@@ -455,26 +454,6 @@ class HomeServer(metaclass=abc.ABCMeta):
         )
 
     @cache_in_self
-    def get_pusher_http_client(self) -> SimpleHttpClient:
-        # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to
-        # do so in batches, so we need to allow the pool to keep lots of idle
-        # connections around.
-        pool = HTTPConnectionPool(self.get_reactor())
-        # XXX: The justification for using the cache factor here is that larger
-        # instances will need both more cache and more connections.
-        # Still, this should probably be a separate dial
-        pool.maxPersistentPerHost = max(int(100 * self.config.caches.global_factor), 5)
-        pool.cachedConnectionTimeout = 2 * 60
-
-        return SimpleHttpClient(
-            self,
-            ip_whitelist=self.config.server.ip_range_whitelist,
-            ip_blacklist=self.config.server.ip_range_blacklist,
-            use_proxy=True,
-            connection_pool=pool,
-        )
-
-    @cache_in_self
     def get_federation_http_client(self) -> MatrixFederationHttpClient:
         """
         An HTTP client for federation.