summary refs log tree commit diff
diff options
context:
space:
mode:
authorKegan Dougal <kegan@matrix.org>2015-03-06 11:50:27 +0000
committerKegan Dougal <kegan@matrix.org>2015-03-06 11:50:27 +0000
commit192e228a98f3700f48d7fd136f4dce2979ec7c90 (patch)
treee348168d50b63100629966e5dce7c38844ec3699
parentRejig structure given the appservice_handler already filters the correct ASes... (diff)
downloadsynapse-192e228a98f3700f48d7fd136f4dce2979ec7c90.tar.xz
Start adding some tests
-rw-r--r--synapse/appservice/scheduler.py23
-rw-r--r--tests/appservice/test_scheduler.py106
2 files changed, 119 insertions, 10 deletions
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 19fe8e11e8..754f39381f 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -49,6 +49,8 @@ This is all tied together by the AppServiceScheduler which DIs the required
 components.
 """
 
+from twisted.internet import defer
+
 
 class AppServiceScheduler(object):
     """ Public facing API for this module. Does the required DI to tie the
@@ -105,11 +107,14 @@ class _EventGrouper(object):
         self.groups = {}  # dict of {service: [events]}
 
     def on_receive(self, service, event):
-        # TODO group this
-        pass
+        if service not in self.groups:
+            self.groups[service] = []
+        self.groups[service].append(event)
 
     def drain_groups(self):
-        return self.groups
+        groups = self.groups
+        self.groups = {}
+        return groups
 
 
 class _TransactionController(object):
@@ -135,7 +140,6 @@ class _TransactionController(object):
                     self._start_recoverer(service)
         self.clock.call_later(1000, self.start_polling)
 
-
     def on_recovered(self, service):
         # TODO mark AS as UP
         pass
@@ -172,26 +176,25 @@ class _Recoverer(object):
     def recover(self):
         self.clock.call_later(2000 ** self.backoff_counter, self.retry)
 
+    @defer.inlineCallbacks
     def retry(self):
-        txn = self._get_oldest_txn()
+        txn = yield self._get_oldest_txn()
         if txn:
             if txn.send(self.as_api):
                 txn.complete(self.store)
                 # reset the backoff counter and retry immediately
                 self.backoff_counter = 1
                 self.retry()
-                return
             else:
                 self.backoff_counter += 1
                 self.recover()
-                return
         else:
             self._set_service_recovered()
 
     def _set_service_recovered(self):
         self.callback(self.service)
 
+    @defer.inlineCallbacks
     def _get_oldest_txn(self):
-        pass  # returns AppServiceTransaction
-
-
+        txn = yield self.store.get_oldest_txn(self.service)
+        defer.returnValue(txn)
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
new file mode 100644
index 0000000000..b41d4358cf
--- /dev/null
+++ b/tests/appservice/test_scheduler.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.appservice.scheduler import (
+    AppServiceScheduler, AppServiceTransaction, _EventGrouper,
+    _TransactionController, _Recoverer
+)
+from twisted.internet import defer
+from ..utils import MockClock
+from mock import Mock
+from tests import unittest
+
+class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.clock = MockClock()
+        self.as_api = Mock()
+        self.store = Mock()
+        self.service = Mock()
+        self.callback = Mock()
+        self.recoverer = _Recoverer(
+            clock=self.clock,
+            as_api=self.as_api,
+            store=self.store,
+            service=self.service,
+            callback=self.callback,
+        )
+
+    def test_recover_service_single_txn(self):
+        txns = self._mk_txns(1)
+        self.store.get_oldest_txn = Mock(return_value=defer.succeed(txns[0]))
+
+        self.recoverer.recover()
+        self.assertEquals(0, self.store.get_oldest_txn.call_count)
+        self.clock.advance_time(2000)
+        self.assertEquals(2, self.store.get_oldest_txn.call_count)
+
+    def _mk_txns(self, num_txns):
+        return [
+            Mock() for i in range(num_txns)
+        ]
+
+
+
+class ApplicationServiceSchedulerEventGrouperTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.grouper = _EventGrouper()
+
+    def test_drain_single_event(self):
+        service = Mock()
+        event = Mock()
+        self.grouper.on_receive(service, event)
+        groups = self.grouper.drain_groups()
+        self.assertTrue(service in groups)
+        self.assertEquals([event], groups[service])
+        self.assertEquals(1, len(groups.keys()))
+        # no more events
+        self.assertEquals(self.grouper.drain_groups(), {})
+
+    def test_drain_multiple_events(self):
+        service = Mock()
+        events = [Mock(), Mock(), Mock()]
+        for e in events:
+            self.grouper.on_receive(service, e)
+        groups = self.grouper.drain_groups()
+        self.assertTrue(service in groups)
+        self.assertEquals(events, groups[service])
+        # no more events
+        self.assertEquals(self.grouper.drain_groups(), {})
+
+    def test_drain_multiple_services(self):
+        services = [Mock(), Mock(), Mock()]
+        events_a = [Mock(), Mock()]
+        events_b = [Mock()]
+        events_c = [Mock(), Mock(), Mock(), Mock()]
+        mappings = {
+            services[0]: events_a,
+            services[1]: events_b,
+            services[2]: events_c
+        }
+        for e in events_b:
+            self.grouper.on_receive(services[1], e)
+        for e in events_c:
+            self.grouper.on_receive(services[2], e)
+        for e in events_a:
+            self.grouper.on_receive(services[0], e)
+
+        groups = self.grouper.drain_groups()
+        for service in services:
+            self.assertTrue(service in groups)
+            self.assertEquals(mappings[service], groups[service])
+        self.assertEquals(3, len(groups.keys()))
+        # no more events
+        self.assertEquals(self.grouper.drain_groups(), {})