Skip to content

Commit

Permalink
Replace list() by []
Browse files Browse the repository at this point in the history
Signed-off-by: SdgJlbl <sarah.diot-girard@owkin.com>
  • Loading branch information
SdgJlbl committed Mar 22, 2023
1 parent 7d5589d commit 94f1c4d
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 35 deletions.
22 changes: 10 additions & 12 deletions substra/sdk/backends/local/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def _check_same_data_manager(self, data_manager_key, data_samples):
# linked to each sample, so no check (already done in the backend).
if self._db.is_local(data_manager_key, schemas.Type.Dataset):
same_data_manager = all(
[data_manager_key in data_sample.data_manager_keys for data_sample in (data_samples or list())]
[data_manager_key in data_sample.data_manager_keys for data_sample in (data_samples or [])]
)
if not same_data_manager:
raise substra.exceptions.InvalidRequest("A data_sample does not belong to the same dataManager", 400)
Expand All @@ -174,7 +174,7 @@ def __compute_permissions(self, permissions):
updated_permissions = copy.deepcopy(permissions)
owner = self._org_id
if updated_permissions.public:
updated_permissions.authorized_ids = list()
updated_permissions.authorized_ids = []
elif not updated_permissions.public and owner not in updated_permissions.authorized_ids:
updated_permissions.authorized_ids.append(owner)
return updated_permissions
Expand Down Expand Up @@ -274,8 +274,8 @@ def _add_function(self, key, spec, spec_options=None):
"storage_address": function_description_path,
},
metadata=spec.metadata if spec.metadata else dict(),
inputs=spec.inputs or list(),
outputs=spec.outputs or list(),
inputs=spec.inputs or [],
outputs=spec.outputs or [],
)
return self._db.add(function)

Expand All @@ -299,7 +299,7 @@ def _add_dataset(self, key, spec, spec_options=None):
},
},
type=spec.type,
data_sample_keys=list(),
data_sample_keys=[],
opener={"checksum": fs.hash_file(dataset_file_path), "storage_address": dataset_file_path},
description={
"checksum": fs.hash_file(dataset_description_path),
Expand Down Expand Up @@ -333,7 +333,7 @@ def _add_data_sample(self, key, spec, spec_options=None):
return data_sample

def _add_data_samples(self, spec, spec_options=None):
data_samples = list()
data_samples = []
for path in spec.paths:
data_sample_spec = schemas.DataSampleSpec(
path=path,
Expand Down Expand Up @@ -386,9 +386,7 @@ def _add_task(self, key, spec, spec_options=None):

_warn_on_transient_outputs(spec.outputs)

in_task_keys = list(
{inputref.parent_task_key for inputref in (spec.inputs or list()) if inputref.parent_task_key}
)
in_task_keys = list({inputref.parent_task_key for inputref in (spec.inputs or []) if inputref.parent_task_key})
in_tasks = [self._db.get(schemas.Type.Task, in_task_key) for in_task_key in in_task_keys]
compute_plan_key, rank = self.__create_compute_plan_from_task(spec=spec, in_tasks=in_tasks)

Expand All @@ -413,7 +411,7 @@ def _add_task(self, key, spec, spec_options=None):

def _check_inputs_outputs(self, spec, function_key):
function = self._db.get(schemas.Type.Function, function_key)
spec_inputs = spec.inputs or list()
spec_inputs = spec.inputs or []
spec_outputs = spec.outputs or dict()

error_msg = ""
Expand Down Expand Up @@ -467,7 +465,7 @@ def add(self, spec, spec_options=None, key=None):

def link_dataset_with_data_samples(self, dataset_key, data_sample_keys) -> List[str]:
dataset = self._db.get(schemas.Type.Dataset, dataset_key)
data_samples = list()
data_samples = []
for key in data_sample_keys:
data_sample = self._db.get(schemas.Type.DataSample, key)
if dataset_key not in data_sample.data_manager_keys:
Expand Down Expand Up @@ -558,7 +556,7 @@ def add_compute_plan_tasks(self, spec: schemas.UpdateComputePlanTasksSpec, spec_
visited[task.key] = task.rank

# Update the task graph with the tasks already in the CP
task_graph.update({k: list() for k in visited})
task_graph.update({k: [] for k in visited})
visited = graph.compute_ranks(node_graph=task_graph, ranks=visited)

compute_plan = self.__execute_compute_plan(spec, compute_plan, visited, tasks, spec_options)
Expand Down
6 changes: 3 additions & 3 deletions substra/sdk/backends/local/compute/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def _prepare_dataset_input(
def _prepare_datasample_input(
self, datasample_input_refs: List[models.InputRef], datasamples: List[models.DataSample], multiple: bool
) -> Tuple[List[TaskResource], Dict[str, str]]:
task_resources = list()
task_resources = []
data_sample_paths = dict()
for datasample_input, datasample in zip(datasample_input_refs, datasamples):
datasample_path_arg = f"{TPL_VOLUME_INPUTS}/{datasample_input.asset_key}"
Expand Down Expand Up @@ -263,8 +263,8 @@ def schedule_task(self, task: models.Task):
dataset: Optional[models.Dataset] = None
data_sample_paths: Optional[Dict[str, str]] = None

datasample_input_refs: List[models.InputRef] = list()
datasamples: List[models.DataSample] = list()
datasample_input_refs: List[models.InputRef] = []
datasamples: List[models.DataSample] = []

# Prepare inputs
for task_input in task.inputs:
Expand Down
2 changes: 1 addition & 1 deletion substra/sdk/backends/local/dal.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def list(
else:
local_assets = self._db.list(type_=type_, filters=filters, order_by=order_by, ascending=ascending)

remote_assets = list()
remote_assets = []
if self._remote:
try:
remote_assets = self._remote.list(
Expand Down
2 changes: 1 addition & 1 deletion substra/sdk/compute_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def get_dependency_graph(spec: schemas._BaseComputePlanSpec):
task_graph=task_graph,
task_id=task.task_id,
in_model_ids=[
input_ref.parent_task_key for input_ref in (task.inputs or list()) if input_ref.parent_task_key
input_ref.parent_task_key for input_ref in (task.inputs or []) if input_ref.parent_task_key
],
)
tasks[task.task_id] = schemas.TaskSpec.from_compute_plan(
Expand Down
4 changes: 2 additions & 2 deletions substra/sdk/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def _get_inverted_node_graph(node_graph, node_to_ignore):
if node not in node_to_ignore:
for dependency in dependencies:
if dependency not in node_to_ignore:
inverted.setdefault(dependency, list())
inverted.setdefault(dependency, [])
inverted[dependency].append(node)
return inverted

Expand All @@ -29,7 +29,7 @@ def _breadth_first_traversal_rank(

while len(queue) > 0:
current_node = queue.pop(0)
for child in inverted_node_graph.get(current_node, list()):
for child in inverted_node_graph.get(current_node, []):
new_child_rank = max(ranks[current_node] + 1, ranks.get(child, -1))

if new_child_rank != ranks.get(child, -1):
Expand Down
28 changes: 14 additions & 14 deletions substra/sdk/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __repr__(self):
@staticmethod
def allowed_filters() -> List[str]:
"""allowed fields to filter on"""
return list()
return []


class DataSample(_Model):
Expand Down Expand Up @@ -139,7 +139,7 @@ class Dataset(_Model):
owner: str
permissions: Permissions
type: str
data_sample_keys: List[str] = list()
data_sample_keys: List[str] = []
opener: _File
description: _File
metadata: Dict[str, str]
Expand Down Expand Up @@ -350,18 +350,18 @@ def allowed_filters() -> List[str]:
class Performances(_Model):
"""Performances of the different compute tasks of a compute plan"""

compute_plan_key: List[str] = list()
compute_plan_tag: List[str] = list()
compute_plan_status: List[str] = list()
compute_plan_start_date: List[datetime] = list()
compute_plan_end_date: List[datetime] = list()
compute_plan_metadata: List[dict] = list()
worker: List[str] = list()
task_key: List[str] = list()
function_name: List[str] = list()
task_rank: List[int] = list()
round_idx: List[int] = list()
performance: List[float] = list()
compute_plan_key: List[str] = []
compute_plan_tag: List[str] = []
compute_plan_status: List[str] = []
compute_plan_start_date: List[datetime] = []
compute_plan_end_date: List[datetime] = []
compute_plan_metadata: List[dict] = []
worker: List[str] = []
task_key: List[str] = []
function_name: List[str] = []
task_rank: List[int] = []
round_idx: List[int] = []
performance: List[float] = []


class Organization(schemas._PydanticConfig):
Expand Down
4 changes: 2 additions & 2 deletions substra/sdk/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,15 +346,15 @@ class FunctionSpec(_Spec):

@pydantic.validator("inputs")
def _check_inputs(cls, v): # noqa: N805
inputs = v or list()
inputs = v or []
identifiers = {value.identifier for value in inputs}
if len(identifiers) != len(inputs):
raise ValueError("Several function inputs cannot have the same identifier.")
return v

@pydantic.validator("outputs")
def _check_outputs(cls, v): # noqa: N805
outputs = v or list()
outputs = v or []
identifiers = {value.identifier for value in outputs}
if len(identifiers) != len(outputs):
raise ValueError("Several function outputs cannot have the same identifier.")
Expand Down

0 comments on commit 94f1c4d

Please sign in to comment.