diff --git a/changelog.d/16283.misc b/changelog.d/16283.misc new file mode 100644 index 000000000000..4b9d6f76aef9 --- /dev/null +++ b/changelog.d/16283.misc @@ -0,0 +1 @@ +Enable additional linting checks. diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 1310f078e3ac..508de5dcbd2f 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -37,7 +37,6 @@ def put_json(self, url, data): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass def get_json(self, url, args=None): """Gets some json from the given host homeserver and path @@ -53,7 +52,6 @@ def get_json(self, url, args=None): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass class TwistedHttpClient(HttpClient): diff --git a/docker/start.py b/docker/start.py index aebc7e4aaa52..12c444da9a39 100755 --- a/docker/start.py +++ b/docker/start.py @@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: log("Could not find %s, will not use" % (jemallocpath,)) # if there are no config files passed to synapse, try adding the default file - if not any(p.startswith("--config-path") or p.startswith("-c") for p in args): + if not any(p.startswith(("--config-path", "-c")) for p in args): config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") config_path = environ.get( "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml" diff --git a/pyproject.toml b/pyproject.toml index c17f4da72d4d..098cd743c2b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,33 +43,39 @@ target-version = ['py38', 'py39', 'py310', 'py311'] [tool.ruff] line-length = 88 -# See https://github.com/charliermarsh/ruff/#pycodestyle +# See https://beta.ruff.rs/docs/rules/#error-e # for error codes. The ones we ignore are: -# E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) +# E731: do not assign a lambda expression, use a def # # flake8-bugbear compatible checks. Its error codes are described at -# https://github.com/charliermarsh/ruff/#flake8-bugbear -# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks +# https://beta.ruff.rs/docs/rules/#flake8-bugbear-b # B023: Functions defined inside a loop must not use variables redefined in the loop -# B024: Abstract base class with no abstract method. ignore = [ - "B019", "B023", - "B024", "E501", "E731", ] select = [ - # pycodestyle checks. + # pycodestyle "E", "W", - # pyflakes checks. + # pyflakes "F", - # flake8-bugbear checks. + # flake8-bugbear "B0", - # flake8-comprehensions checks. + # flake8-comprehensions "C4", + # flake8-2020 + "YTT", + # flake8-slots + "SLOT", + # flake8-debugger + "T10", + # flake8-pie + "PIE", + # flake8-executable + "EXE", ] [tool.isort] diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 8058e9c993b1..a0b3854f1b3f 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -30,9 +30,10 @@ def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[MethodSigContext], CallableType]]: if fullname.startswith( - "synapse.util.caches.descriptors.CachedFunction.__call__" - ) or fullname.startswith( - "synapse.util.caches.descriptors._LruCachedFunction.__call__" + ( + "synapse.util.caches.descriptors.CachedFunction.__call__", + "synapse.util.caches.descriptors._LruCachedFunction.__call__", + ) ): return cached_function_method_signature return None diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index f97aecf8d5cd..992ae4388124 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9e3d4e55689..5bdfa3a8aced 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -55,7 +55,6 @@ async def persist( A method to convert an UnpersistedEventContext to an EventContext, suitable for sending to the database with the associated event. """ - pass @abstractmethod async def get_prev_state_ids( @@ -69,7 +68,6 @@ async def get_prev_state_ids( state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules """ - pass @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 70b32cee1794..9b5a3dd5f405 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -846,9 +846,7 @@ def _is_media(content_type: str) -> bool: def _is_html(content_type: str) -> bool: content_type = content_type.lower() - return content_type.startswith("text/html") or content_type.startswith( - "application/xhtml" - ) + return content_type.startswith(("text/html", "application/xhtml")) def _is_json(content_type: str) -> bool: diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 7619f405fa09..99ebd96f8426 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -62,7 +62,6 @@ class Constraint(metaclass=abc.ABCMeta): @abc.abstractmethod def make_check_clause(self, table: str) -> str: """Returns an SQL expression that checks the row passes the constraint.""" - pass @abc.abstractmethod def make_constraint_clause_postgres(self) -> str: @@ -70,7 +69,6 @@ def make_constraint_clause_postgres(self) -> str: Only used on Postgres DBs """ - pass @attr.s(auto_attribs=True) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 8beb077e0a33..04e5b29dc95d 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -112,7 +112,7 @@ class Config: start = perf_counter() # Send a bunch of useful messages - for i in range(0, loops): + for i in range(loops): logger.info("test message %s", i) if len(handler._buffer) == handler.maximum_buffer: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 9659a4a3553b..79d327499baa 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -223,7 +223,7 @@ def test_delete_device_and_big_device_inbox(self) -> None: # queue a bunch of messages in the inbox requester = create_requester(sender, device_id=DEVICE_ID) - for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): + for i in range(DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): self.get_success( self.device_message_handler.send_device_message( requester, "message_type", {receiver: {"*": {"val": i}}} diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 21d63ab1f297..4fc074241341 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -262,7 +262,7 @@ def test_backfill_with_many_backward_extremities(self) -> None: if (ev.type, ev.state_key) in {("m.room.create", ""), ("m.room.member", remote_server_user_id)} ] - for _ in range(0, 8): + for _ in range(8): event = make_event_from_dict( self.add_hashes_and_signatures_from_other_server( { diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 5191e31a8ae8..45eac100bf02 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -78,11 +78,11 @@ def test_log_backpressure_debug(self) -> None: logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 7): + for i in range(7): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -108,15 +108,15 @@ def test_log_backpressure_info(self) -> None: logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 10): + for i in range(10): logger.warning("warn %s" % (i,)) # Send a bunch of info messages - for i in range(0, 3): + for i in range(3): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -144,7 +144,7 @@ def test_log_backpressure_cut_middle(self) -> None: logger = self.get_logger(handler) # Send a bunch of useful messages - for i in range(0, 20): + for i in range(20): logger.warning("warn %s" % (i,)) # Allow the reconnection diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py index fb9eac668f19..ab379e8cf1eb 100644 --- a/tests/replication/tcp/streams/test_to_device.py +++ b/tests/replication/tcp/streams/test_to_device.py @@ -49,7 +49,7 @@ def test_to_device_stream(self) -> None: # add messages to the device inbox for user1 up until the # limit defined for a stream update batch - for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT): + for i in range(_STREAM_UPDATE_TARGET_ROW_COUNT): msg["content"] = {"device": {}} messages = {user1: {"device": msg}} diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 4c7864c629f3..0e2824d1b532 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -510,7 +510,7 @@ def _create_destinations(self, number_destinations: int) -> None: Args: number_destinations: Number of destinations to be created """ - for i in range(0, number_destinations): + for i in range(number_destinations): dest = f"sub{i}.example.com" self._create_destination(dest, 50, 50, 50, 100) @@ -690,7 +690,7 @@ def test_order_direction(self) -> None: self._check_fields(channel_desc.json_body["rooms"]) # test that both lists have different directions - for i in range(0, number_rooms): + for i in range(number_rooms): self.assertEqual( channel_asc.json_body["rooms"][i]["room_id"], channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"], @@ -777,7 +777,7 @@ def _create_destination_rooms(self, number_rooms: int) -> None: Args: number_rooms: Number of rooms to be created """ - for _ in range(0, number_rooms): + for _ in range(number_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index e9f495e20671..a090b91ac821 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -562,7 +562,7 @@ def test_background_update_deletes_deactivated_users_server_side_backup_keys( # create a bunch of users and add keys for them users = [] - for i in range(0, 20): + for i in range(20): user_id = self.register_user("missPiggy" + str(i), "test") users.append((user_id,)) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index a2a65895647f..768d7ad4c217 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -176,10 +176,10 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: def test_POST_ratelimiting_per_address(self) -> None: # Create different users so we're sure not to be bothered by the per-user # ratelimiter. - for i in range(0, 6): + for i in range(6): self.register_user("kermit" + str(i), "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit" + str(i)}, @@ -228,7 +228,7 @@ def test_POST_ratelimiting_per_address(self) -> None: def test_POST_ratelimiting_per_account(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, @@ -277,7 +277,7 @@ def test_POST_ratelimiting_per_account(self) -> None: def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index c33393dc284b..ba4e017a0e80 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -169,7 +169,7 @@ def test_POST_disabled_guest_registration(self) -> None: @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting_guest(self) -> None: - for i in range(0, 6): + for i in range(6): url = self.url + b"?kind=guest" channel = self.make_request(b"POST", url, b"{}") @@ -187,7 +187,7 @@ def test_POST_ratelimiting_guest(self) -> None: @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: - for i in range(0, 6): + for i in range(6): request_data = { "username": "kermit" + str(i), "password": "monkey", @@ -1223,7 +1223,7 @@ def test_GET_token_invalid(self) -> None: def test_GET_ratelimiting(self) -> None: token = "1234" - for i in range(0, 6): + for i in range(6): channel = self.make_request( b"GET", f"{self.url}?token={token}", diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 650b4941bab6..35f77052a729 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -382,7 +382,7 @@ def test_maintain_lock(self) -> None: self.get_success(lock.__aenter__()) # Wait for ages with the lock, we should not be able to get the lock. - for _ in range(0, 10): + for _ in range(10): self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) lock2 = self.get_success( diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 48ebfadaab31..b55dd07f1496 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -664,7 +664,7 @@ def test_background_update_single_large_room(self) -> None: # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id, event_type="m.test", body={"index": i}, tok=self.token ) @@ -718,12 +718,12 @@ def test_background_update_multiple_large_room(self) -> None: # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id1, event_type="m.test", body={"index": i}, tok=self.token ) - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id2, event_type="m.test", body={"index": i}, tok=self.token ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 7a4ecab2d534..d3e20f44b29a 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -227,7 +227,7 @@ def insert_event(txn: Cursor, i: int) -> None: (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i) ) @@ -235,7 +235,7 @@ def insert_event(txn: Cursor, i: int) -> None: # this should get the last ten r = self.get_success(self.store.get_prev_events_for_room(room_id)) self.assertEqual(10, len(r)) - for i in range(0, 10): + for i in range(10): self.assertEqual("$event_%i:local" % (19 - i), r[i]) def test_get_rooms_with_many_extremities(self) -> None: @@ -277,7 +277,7 @@ def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None: (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i, room1) ) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index fe5bb7791336..95f99f413011 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -82,7 +82,7 @@ def f(txn: LoggingTransaction) -> None: self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "profiles", @@ -115,7 +115,7 @@ def f(txn: LoggingTransaction) -> None: ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py index 15ea4770bd7e..22f074982f98 100644 --- a/tests/storage/test_txn_limit.py +++ b/tests/storage/test_txn_limit.py @@ -38,5 +38,5 @@ def do_select(txn: Cursor) -> None: db_pool = self.hs.get_datastores().databases[0] # force txn limit to roll over at least once - for _ in range(0, 1001): + for _ in range(1001): self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py index bab802f56ec6..d4637d9d1ebb 100644 --- a/tests/storage/test_user_filters.py +++ b/tests/storage/test_user_filters.py @@ -45,7 +45,7 @@ def f(txn: LoggingTransaction) -> None: self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "user_filters", @@ -82,7 +82,7 @@ def f(txn: LoggingTransaction) -> None: ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/test_visibility.py b/tests/test_visibility.py index a46c29ddf4e4..434902c3f096 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -51,12 +51,12 @@ def test_filtering(self) -> None: # before we do that, we persist some other events to act as state. self._inject_visibility("@admin:hs", "joined") - for i in range(0, 10): + for i in range(10): self._inject_room_member("@resident%i:hs" % i) events_to_filter = [] - for i in range(0, 10): + for i in range(10): user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server") evt = self._inject_room_member(user, extra_content={"a": "b"}) events_to_filter.append(evt) @@ -74,7 +74,7 @@ def test_filtering(self) -> None: ) # the result should be 5 redacted events, and 5 unredacted events. - for i in range(0, 5): + for i in range(5): self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id) self.assertNotIn("a", filtered[i].content) @@ -177,7 +177,7 @@ def test_erased_user(self) -> None: ) ) - for i in range(0, len(events_to_filter)): + for i in range(len(events_to_filter)): self.assertEqual( events_to_filter[i].event_id, filtered[i].event_id, diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 064f4987dfeb..168419f440fb 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -623,14 +623,14 @@ def func(self, key: int) -> int: a = A() - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertEqual(callcount[0], 12) # There must have been at least 2 evictions, meaning if we calculate # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertTrue(