Skip to content

Commit 6fa257b

Browse files
authored
chore(lint): update Ruff ignores for project conventions and maintainability (#1184)
- Added new ignores from flake8-bugbear (`B007`, `B008`) - Ignored `C901` (high function complexity) for now, pending review - Maintained PyTorch conventions (`N812`, `N817`) - Allowed `E731` (lambda assignments) for flexibility - Consolidated existing ignores (`E402`, `E501`, `F405`, `C408`, `N812`) - Documented rationale for each ignored rule This keeps our linting aligned with project needs while tracking potential fixes. Signed-off-by: Sébastien Han <seb@redhat.com> Signed-off-by: Sébastien Han <seb@redhat.com>
1 parent 3b57d8e commit 6fa257b

File tree

33 files changed

+113
-145
lines changed

33 files changed

+113
-145
lines changed

llama_stack/cli/stack/_build.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
141141
completer=WordCompleter(available_providers),
142142
complete_while_typing=True,
143143
validator=Validator.from_callable(
144-
lambda x: x in available_providers,
144+
lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847
145145
error_message="Invalid provider, use <TAB> to see options",
146146
),
147147
)

llama_stack/cli/tests/test_stack_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def test_parse_and_maybe_upgrade_config_old_format(old_config):
112112

113113
inference_providers = result.providers["inference"]
114114
assert len(inference_providers) == 2
115-
assert set(x.provider_id for x in inference_providers) == {
115+
assert {x.provider_id for x in inference_providers} == {
116116
"remote::ollama-00",
117117
"meta-reference-01",
118118
}

llama_stack/distribution/distribution.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414

1515
def stack_apis() -> List[Api]:
16-
return [v for v in Api]
16+
return list(Api)
1717

1818

1919
class AutoRoutedApiInfo(BaseModel):
@@ -55,7 +55,7 @@ def builtin_automatically_routed_apis() -> List[AutoRoutedApiInfo]:
5555

5656

5757
def providable_apis() -> List[Api]:
58-
routing_table_apis = set(x.routing_table_api for x in builtin_automatically_routed_apis())
58+
routing_table_apis = {x.routing_table_api for x in builtin_automatically_routed_apis()}
5959
return [api for api in Api if api not in routing_table_apis and api != Api.inspect]
6060

6161

llama_stack/distribution/resolver.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,8 @@ async def resolve_impls(
115115
- flatmaps, sorts and resolves the providers in dependency order
116116
- for each API, produces either a (local, passthrough or router) implementation
117117
"""
118-
routing_table_apis = set(x.routing_table_api for x in builtin_automatically_routed_apis())
119-
router_apis = set(x.router_api for x in builtin_automatically_routed_apis())
118+
routing_table_apis = {x.routing_table_api for x in builtin_automatically_routed_apis()}
119+
router_apis = {x.router_api for x in builtin_automatically_routed_apis()}
120120

121121
providers_with_specs = {}
122122

llama_stack/distribution/ui/page/playground/rag.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def rag_chat_page():
134134
dict(
135135
name="builtin::rag/knowledge_search",
136136
args={
137-
"vector_db_ids": [vector_db_id for vector_db_id in selected_vector_dbs],
137+
"vector_db_ids": list(selected_vector_dbs),
138138
},
139139
)
140140
],

llama_stack/providers/inline/agents/meta_reference/agent_instance.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -797,10 +797,10 @@ async def _get_tool_defs(
797797
self, toolgroups_for_turn: Optional[List[AgentToolGroup]] = None
798798
) -> Tuple[List[ToolDefinition], Dict[str, str]]:
799799
# Determine which tools to include
800-
agent_config_toolgroups = set(
801-
(toolgroup.name if isinstance(toolgroup, AgentToolGroupWithArgs) else toolgroup)
800+
agent_config_toolgroups = {
801+
toolgroup.name if isinstance(toolgroup, AgentToolGroupWithArgs) else toolgroup
802802
for toolgroup in self.agent_config.toolgroups
803-
)
803+
}
804804
toolgroups_for_turn_set = (
805805
agent_config_toolgroups
806806
if toolgroups_for_turn is None

llama_stack/providers/inline/eval/meta_reference/eval.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ async def run_eval(
8686
) -> Job:
8787
task_def = self.benchmarks[benchmark_id]
8888
dataset_id = task_def.dataset_id
89-
candidate = task_config.eval_candidate
9089
scoring_functions = task_def.scoring_functions
9190
dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
9291
validate_dataset_schema(dataset_def.dataset_schema, get_valid_schemas(Api.eval.value))

llama_stack/providers/inline/inference/meta_reference/inference.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,6 @@ def impl():
208208
logprobs = []
209209
stop_reason = None
210210

211-
tokenizer = self.generator.formatter.tokenizer
212211
for token_result in self.generator.completion(request):
213212
tokens.append(token_result.token)
214213
if token_result.text == "<|eot_id|>":

llama_stack/providers/inline/inference/meta_reference/parallel_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ def maybe_parse_message(maybe_json: Optional[str]) -> Optional[ProcessingMessage
207207
return parse_message(maybe_json)
208208
except json.JSONDecodeError:
209209
return None
210-
except ValueError as e:
210+
except ValueError:
211211
return None
212212

213213

@@ -352,7 +352,7 @@ def run_inference(
352352
if isinstance(obj, TaskResponse):
353353
yield obj.result
354354

355-
except GeneratorExit as e:
355+
except GeneratorExit:
356356
self.request_socket.send(encode_msg(CancelSentinel()))
357357
while True:
358358
obj_json = self.request_socket.send()

llama_stack/providers/inline/inference/meta_reference/quantization/fp8_txest_disabled.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77
# Copyright (c) Meta Platforms, Inc. and affiliates.
88
# This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
99

10+
# The file gets a special treatment for now?
11+
# ruff: noqa: N803
12+
1013
import unittest
1114

1215
import torch

0 commit comments

Comments
 (0)