diff options
Diffstat (limited to 'synapse/storage')
-rw-r--r-- | synapse/storage/deviceinbox.py | 30 | ||||
-rw-r--r-- | synapse/storage/devices.py | 8 | ||||
-rw-r--r-- | synapse/storage/end_to_end_keys.py | 54 |
3 files changed, 46 insertions, 46 deletions
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index c561d15829..23bf028689 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -22,7 +22,7 @@ from twisted.internet import defer from synapse.storage._base import SQLBaseStore from synapse.storage.background_updates import BackgroundUpdateStore from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.tracerutils import TracerUtil, trace_defered_function, trace_function +import synapse.util.tracerutils as tracerutils logger = logging.getLogger(__name__) @@ -73,7 +73,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): "get_new_messages_for_device", get_new_messages_for_device_txn ) - @trace_defered_function + @tracerutils.trace_defered_function @defer.inlineCallbacks def delete_messages_for_device(self, user_id, device_id, up_to_stream_id): """ @@ -90,14 +90,14 @@ class DeviceInboxWorkerStore(SQLBaseStore): (user_id, device_id), None ) - TracerUtil.set_tag("last_deleted_stream_id", last_deleted_stream_id) + tracerutils.set_tag("last_deleted_stream_id", last_deleted_stream_id) if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( user_id, last_deleted_stream_id ) if not has_changed: - TracerUtil.log_kv({"message": "No changes in cache since last check"}) + tracerutils.log_kv({"message": "No changes in cache since last check"}) defer.returnValue(0) def delete_messages_for_device_txn(txn): @@ -113,7 +113,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): "delete_messages_for_device", delete_messages_for_device_txn ) - TracerUtil.log_kv( + tracerutils.log_kv( {"message": "deleted {} messages for device".format(count), "count": count} ) @@ -127,7 +127,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): defer.returnValue(count) - @trace_defered_function + @tracerutils.trace_defered_function def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit ): @@ -143,23 +143,23 @@ class DeviceInboxWorkerStore(SQLBaseStore): in the stream the messages got to. """ - TracerUtil.set_tag("destination", destination) - TracerUtil.set_tag("last_stream_id", last_stream_id) - TracerUtil.set_tag("current_stream_id", current_stream_id) - TracerUtil.set_tag("limit", limit) + tracerutils.set_tag("destination", destination) + tracerutils.set_tag("last_stream_id", last_stream_id) + tracerutils.set_tag("current_stream_id", current_stream_id) + tracerutils.set_tag("limit", limit) has_changed = self._device_federation_outbox_stream_cache.has_entity_changed( destination, last_stream_id ) if not has_changed or last_stream_id == current_stream_id: - TracerUtil.log_kv({"message": "No new messages in stream"}) + tracerutils.log_kv({"message": "No new messages in stream"}) return defer.succeed(([], current_stream_id)) if limit <= 0: # This can happen if we run out of room for EDUs in the transaction. return defer.succeed(([], last_stream_id)) - @trace_function + @tracerutils.trace_function def get_new_messages_for_remote_destination_txn(txn): sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" @@ -174,7 +174,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): stream_pos = row[0] messages.append(json.loads(row[1])) if len(messages) < limit: - TracerUtil.log_kv( + tracerutils.log_kv( {"message": "Set stream position to current position"} ) stream_pos = current_stream_id @@ -185,7 +185,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): get_new_messages_for_remote_destination_txn, ) - @trace_defered_function + @tracerutils.trace_defered_function def delete_device_msgs_for_remote(self, destination, up_to_stream_id): """Used to delete messages when the remote destination acknowledges their receipt. @@ -236,7 +236,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): expiry_ms=30 * 60 * 1000, ) - @trace_defered_function + @tracerutils.trace_defered_function @defer.inlineCallbacks def add_messages_to_device_inbox( self, local_messages_by_user_then_device, remote_messages_by_destination diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 7476c89f93..f193ea4ff2 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from synapse.util.tracerutils import TracerUtil, trace_defered_function +import synapse.util.tracerutils as tracerutils from six import iteritems @@ -300,7 +300,7 @@ class DeviceWorkerStore(SQLBaseStore): def get_device_stream_token(self): return self._device_list_id_gen.get_current_token() - @trace_defered_function + @tracerutils.trace_defered_function @defer.inlineCallbacks def get_user_devices_from_cache(self, query_list): """Get the devices (and keys if any) for remote users from the cache. @@ -331,8 +331,8 @@ class DeviceWorkerStore(SQLBaseStore): results.setdefault(user_id, {})[device_id] = device else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - TracerUtil.set_tag("in_cache", results) - TracerUtil.set_tag("not_in_cache", user_ids_not_in_cache) + tracerutils.set_tag("in_cache", results) + tracerutils.set_tag("not_in_cache", user_ids_not_in_cache) defer.returnValue((user_ids_not_in_cache, results)) @cachedInlineCallbacks(num_args=2, tree=True) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index fc31d26f70..8308a1473a 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -22,11 +22,11 @@ from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json -from synapse.util.tracerutils import TracerUtil, trace_defered_function, trace_function +import synapse.util.tracerutils as tracerutils class EndToEndKeyWorkerStore(SQLBaseStore): - @trace_defered_function + @tracerutils.trace_defered_function @defer.inlineCallbacks def get_e2e_device_keys( self, query_list, include_all_devices=False, include_deleted_devices=False @@ -43,7 +43,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): Dict mapping from user-id to dict mapping from device_id to dict containing "key_json", "device_display_name". """ - TracerUtil.set_tag("query_list", query_list) + tracerutils.set_tag("query_list", query_list) if not query_list: defer.returnValue({}) @@ -61,12 +61,12 @@ class EndToEndKeyWorkerStore(SQLBaseStore): defer.returnValue(results) - @trace_function + @tracerutils.trace_function def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False ): - TracerUtil.set_tag("include_all_devices", include_all_devices) - TracerUtil.set_tag("include_deleted_devices", include_deleted_devices) + tracerutils.set_tag("include_all_devices", include_all_devices) + tracerutils.set_tag("include_deleted_devices", include_deleted_devices) query_clauses = [] query_params = [] @@ -112,10 +112,10 @@ class EndToEndKeyWorkerStore(SQLBaseStore): for user_id, device_id in deleted_devices: result.setdefault(user_id, {})[device_id] = None - TracerUtil.log_kv(result) + tracerutils.log_kv(result) return result - @trace_defered_function + @tracerutils.trace_defered_function @defer.inlineCallbacks def get_e2e_one_time_keys(self, user_id, device_id, key_ids): """Retrieve a number of one-time keys for a user @@ -131,9 +131,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): key_id) to json string for key """ - TracerUtil.set_tag("user_id", user_id) - TracerUtil.set_tag("device_id", device_id) - TracerUtil.set_tag("key_ids", key_ids) + tracerutils.set_tag("user_id", user_id) + tracerutils.set_tag("device_id", device_id) + tracerutils.set_tag("key_ids", key_ids) rows = yield self._simple_select_many_batch( table="e2e_one_time_keys_json", @@ -161,11 +161,11 @@ class EndToEndKeyWorkerStore(SQLBaseStore): (algorithm, key_id, key json) """ - @trace_function + @tracerutils.strace_function def _add_e2e_one_time_keys(txn): - TracerUtil.set_tag("user_id", user_id) - TracerUtil.set_tag("device_id", device_id) - TracerUtil.set_tag("new_keys", new_keys) + tracerutils.set_tag("user_id", user_id) + tracerutils.set_tag("device_id", device_id) + tracerutils.set_tag("new_keys", new_keys) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -221,12 +221,12 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): or the keys were already in the database. """ - @trace_function + @tracerutils.trace_function def _set_e2e_device_keys_txn(txn): - TracerUtil.set_tag("user_id", user_id) - TracerUtil.set_tag("device_id", device_id) - TracerUtil.set_tag("time_now", time_now) - TracerUtil.set_tag("device_keys", device_keys) + tracerutils.set_tag("user_id", user_id) + tracerutils.set_tag("device_id", device_id) + tracerutils.set_tag("time_now", time_now) + tracerutils.set_tag("device_keys", device_keys) old_key_json = self._simple_select_one_onecol_txn( txn, @@ -241,7 +241,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): new_key_json = encode_canonical_json(device_keys).decode("utf-8") if old_key_json == new_key_json: - TracerUtil.log_kv({"event", "key already stored"}) + tracerutils.log_kv({"event", "key already stored"}) return False self._simple_upsert_txn( @@ -258,7 +258,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def claim_e2e_one_time_keys(self, query_list): """Take a list of one time keys out of the database""" - @trace_function + @tracerutils.trace_function def _claim_e2e_one_time_keys(txn): sql = ( "SELECT key_id, key_json FROM e2e_one_time_keys_json" @@ -280,11 +280,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): " AND key_id = ?" ) for user_id, device_id, algorithm, key_id in delete: - TracerUtil.log_kv( + tracerutils.log_kv( {"message": "executing claim transaction on database"} ) txn.execute(sql, (user_id, device_id, algorithm, key_id)) - TracerUtil.log_kv( + tracerutils.log_kv( {"message": "finished executing and invalidating cache"} ) self._invalidate_cache_and_stream( @@ -295,10 +295,10 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): return self.runInteraction("claim_e2e_one_time_keys", _claim_e2e_one_time_keys) def delete_e2e_keys_by_device(self, user_id, device_id): - @trace_function + @tracerutils.trace_function def delete_e2e_keys_by_device_txn(txn): - TracerUtil.set_tag("user_id", user_id) - TracerUtil.set_tag("device_id", device_id) + tracerutils.set_tag("user_id", user_id) + tracerutils.set_tag("device_id", device_id) self._simple_delete_txn( txn, table="e2e_device_keys_json", |