Skip to content

Commit

Permalink
Refactor task normalization
Browse files Browse the repository at this point in the history
Fixes: #4177
  • Loading branch information
ssbarnea committed May 31, 2024
1 parent fc87a86 commit d1aff67
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 55 deletions.
10 changes: 5 additions & 5 deletions src/ansiblelint/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ def find_children(self, lintable: Lintable) -> list[Lintable]:
for item in ansiblelint.utils.playbook_items(playbook_ds):
# if lintable.kind not in ["playbook"]:
for child in self.play_children(
lintable.path.parent,
lintable,
item,
lintable.kind,
playbook_dir,
Expand All @@ -525,19 +525,19 @@ def find_children(self, lintable: Lintable) -> list[Lintable]:

def play_children(
self,
basedir: Path,
lintable: Lintable,
item: tuple[str, Any],
parent_type: FileType,
playbook_dir: str,
) -> list[Lintable]:
"""Flatten the traversed play tasks."""
# pylint: disable=unused-argument

basedir = lintable.path.parent
handlers = HandleChildren(self.rules, app=self.app)

delegate_map: dict[
str,
Callable[[str, Any, Any, FileType], list[Lintable]],
Callable[[Lintable, Any, Any, FileType], list[Lintable]],
] = {
"tasks": handlers.taskshandlers_children,
"pre_tasks": handlers.taskshandlers_children,
Expand Down Expand Up @@ -565,7 +565,7 @@ def play_children(
{"playbook_dir": PLAYBOOK_DIR or str(basedir.resolve())},
fail_on_undefined=False,
)
return delegate_map[k](str(basedir), k, v, parent_type)
return delegate_map[k](lintable, k, v, parent_type)
return []

def plugin_children(self, lintable: Lintable) -> list[Lintable]:
Expand Down
57 changes: 29 additions & 28 deletions src/ansiblelint/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,12 +287,13 @@ class HandleChildren:

def include_children(
self,
basedir: str,
lintable: Lintable,
k: str,
v: Any,
parent_type: FileType,
) -> list[Lintable]:
"""Include children."""
basedir = str(lintable.path.parent)
# import_playbook only accepts a string as argument (no dict syntax)
if k in (
"import_playbook",
Expand Down Expand Up @@ -332,12 +333,13 @@ def include_children(

def taskshandlers_children(
self,
basedir: str,
lintable: Lintable,
k: str,
v: None | Any,
parent_type: FileType,
) -> list[Lintable]:
"""TasksHandlers Children."""
basedir = str(lintable.path.parent)
results: list[Lintable] = []
if v is None or isinstance(v, int | str):
raise MatchError(
Expand All @@ -360,14 +362,14 @@ def taskshandlers_children(
continue

if any(x in task_handler for x in ROLE_IMPORT_ACTION_NAMES):
task_handler = normalize_task_v2(task_handler)
task_handler = normalize_task_v2(Task(task_handler))
self._validate_task_handler_action_for_role(task_handler["action"])
name = task_handler["action"].get("name")
if has_jinja(name):
# we cannot deal with dynamic imports
continue
results.extend(
self.roles_children(basedir, k, [name], parent_type),
self.roles_children(lintable, k, [name], parent_type),
)
continue

Expand All @@ -378,7 +380,7 @@ def taskshandlers_children(
if elem in task_handler:
results.extend(
self.taskshandlers_children(
basedir,
lintable,
k,
task_handler[elem],
parent_type,
Expand Down Expand Up @@ -410,12 +412,13 @@ def _validate_task_handler_action_for_role(self, th_action: dict[str, Any]) -> N

def roles_children(
self,
basedir: str,
lintable: Lintable,
k: str,
v: Sequence[Any],
parent_type: FileType,
) -> list[Lintable]:
"""Roles children."""
basedir = str(lintable.path.parent)
# pylint: disable=unused-argument # parent_type)
results: list[Lintable] = []
if not v:
Expand Down Expand Up @@ -549,13 +552,14 @@ def _extract_ansible_parsed_keys_from_task(
return result


def normalize_task_v2(task: dict[str, Any]) -> dict[str, Any]:
def normalize_task_v2(task: Task) -> dict[str, Any]:
"""Ensure tasks have a normalized action key and strings are converted to python objects."""
raw_task = task.raw_task
result: dict[str, Any] = {}
ansible_parsed_keys = ("action", "local_action", "args", "delegate_to")

if is_nested_task(task):
_extract_ansible_parsed_keys_from_task(result, task, ansible_parsed_keys)
if is_nested_task(raw_task):
_extract_ansible_parsed_keys_from_task(result, raw_task, ansible_parsed_keys)
# Add dummy action for block/always/rescue statements
result["action"] = {
"__ansible_module__": "block/always/rescue",
Expand All @@ -564,19 +568,20 @@ def normalize_task_v2(task: dict[str, Any]) -> dict[str, Any]:

return result

sanitized_task = _sanitize_task(task)
sanitized_task = _sanitize_task(raw_task)
mod_arg_parser = ModuleArgsParser(sanitized_task)

try:
action, arguments, result["delegate_to"] = mod_arg_parser.parse(
skip_action_validation=options.skip_action_validation,
)
except AnsibleParserError as exc:
# breakpoint()
raise MatchError(
rule=AnsibleParserErrorRule(),
message=exc.message,
filename=task.get(FILENAME_KEY, "Unknown"),
lineno=task.get(LINE_NUMBER_KEY, 0),
filename=task.filename or "Unknown",
lineno=raw_task.get(LINE_NUMBER_KEY, 1),
) from exc

# denormalize shell -> command conversion
Expand All @@ -586,7 +591,7 @@ def normalize_task_v2(task: dict[str, Any]) -> dict[str, Any]:

_extract_ansible_parsed_keys_from_task(
result,
task,
raw_task,
(*ansible_parsed_keys, action),
)

Expand All @@ -608,17 +613,6 @@ def normalize_task_v2(task: dict[str, Any]) -> dict[str, Any]:
return result


def normalize_task(task: dict[str, Any], filename: str) -> dict[str, Any]:
"""Unify task-like object structures."""
ansible_action_type = task.get("__ansible_action_type__", "task")
if "__ansible_action_type__" in task:
del task["__ansible_action_type__"]
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task["__ansible_action_type__"] = ansible_action_type
return task


def task_to_str(task: dict[str, Any]) -> str:
"""Make a string identifier for the given task."""
name = task.get("name")
Expand Down Expand Up @@ -742,10 +736,7 @@ def normalized_task(self) -> dict[str, Any]:
"""Return the name of the task."""
if not hasattr(self, "_normalized_task"):
try:
self._normalized_task = normalize_task(
self.raw_task,
filename=self.filename,
)
self._normalized_task = self._normalize_task()
except MatchError as err:
self.error = err
# When we cannot normalize it, we just use the raw task instead
Expand All @@ -756,6 +747,16 @@ def normalized_task(self) -> dict[str, Any]:
raise TypeError(msg)
return self._normalized_task

def _normalize_task(self) -> dict[str, Any]:
"""Unify task-like object structures."""
ansible_action_type = self.raw_task.get("__ansible_action_type__", "task")
if "__ansible_action_type__" in self.raw_task:
del self.raw_task["__ansible_action_type__"]

Check warning on line 754 in src/ansiblelint/utils.py

View check run for this annotation

Codecov / codecov/patch

src/ansiblelint/utils.py#L754

Added line #L754 was not covered by tests
task = normalize_task_v2(self)
task[FILENAME_KEY] = self.filename
task["__ansible_action_type__"] = ansible_action_type
return task

@property
def skip_tags(self) -> list[str]:
"""Return the list of tags to skip."""
Expand Down
53 changes: 31 additions & 22 deletions test/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,37 +114,42 @@ def test_normalize(
alternate_forms: tuple[dict[str, Any]],
) -> None:
"""Test that tasks specified differently are normalized same way."""
normal_form = utils.normalize_task(reference_form, "tasks.yml")
task = utils.Task(reference_form, filename="tasks.yml")
normal_form = task._normalize_task() # noqa: SLF001

for form in alternate_forms:
assert normal_form == utils.normalize_task(form, "tasks.yml")
task2 = utils.Task(form, filename="tasks.yml")
assert normal_form == task2._normalize_task() # noqa: SLF001


def test_normalize_complex_command() -> None:
"""Test that tasks specified differently are normalized same way."""
task1 = {
"name": "hello",
"action": {"module": "pip", "name": "df", "editable": "false"},
}
task2 = {"name": "hello", "pip": {"name": "df", "editable": "false"}}
task3 = {"name": "hello", "pip": "name=df editable=false"}
task4 = {"name": "hello", "action": "pip name=df editable=false"}
assert utils.normalize_task(task1, "tasks.yml") == utils.normalize_task(
task2,
"tasks.yml",
task1 = utils.Task(
{
"name": "hello",
"action": {"module": "pip", "name": "df", "editable": "false"},
},
filename="tasks.yml",
)
task2 = utils.Task(
{"name": "hello", "pip": {"name": "df", "editable": "false"}},
filename="tasks.yml",
)
assert utils.normalize_task(task2, "tasks.yml") == utils.normalize_task(
task3,
"tasks.yml",
task3 = utils.Task(
{"name": "hello", "pip": "name=df editable=false"},
filename="tasks.yml",
)
assert utils.normalize_task(task3, "tasks.yml") == utils.normalize_task(
task4,
"tasks.yml",
task4 = utils.Task(
{"name": "hello", "action": "pip name=df editable=false"},
filename="tasks.yml",
)
assert task1._normalize_task() == task2._normalize_task() # noqa: SLF001
assert task2._normalize_task() == task3._normalize_task() # noqa: SLF001
assert task3._normalize_task() == task4._normalize_task() # noqa: SLF001


@pytest.mark.parametrize(
("task", "expected_form"),
("task_raw", "expected_form"),
(
pytest.param(
{
Expand Down Expand Up @@ -192,8 +197,12 @@ def test_normalize_complex_command() -> None:
),
),
)
def test_normalize_task_v2(task: dict[str, Any], expected_form: dict[str, Any]) -> None:
def test_normalize_task_v2(
task_raw: dict[str, Any],
expected_form: dict[str, Any],
) -> None:
"""Check that it normalizes task and returns the expected form."""
task = utils.Task(task_raw)
assert utils.normalize_task_v2(task) == expected_form


Expand Down Expand Up @@ -263,8 +272,8 @@ def test_template(template: str, output: str) -> None:

def test_task_to_str_unicode() -> None:
"""Ensure that extracting messages from tasks preserves Unicode."""
task = {"fail": {"msg": "unicode é ô à"}}
result = utils.task_to_str(utils.normalize_task(task, "filename.yml"))
task = utils.Task({"fail": {"msg": "unicode é ô à"}}, filename="filename.yml")
result = utils.task_to_str(task._normalize_task()) # noqa: SLF001
assert result == "fail msg=unicode é ô à"


Expand Down

0 comments on commit d1aff67

Please sign in to comment.