From 5c65200e6160cde4e2873aa3fdfce1adb912b734 Mon Sep 17 00:00:00 2001 From: Fabio Zadrozny Date: Wed, 25 May 2022 16:04:55 -0300 Subject: [PATCH] Properly support constructs with nested usage of `Run Keyword` inside `Run Keyword`. Fixes #686 --- .../.settings/org.python.pydev.yaml | 4 +- robotframework-ls/docs/changelog.md | 1 + .../src/robotframework_ls/impl/ast_utils.py | 436 +++--------------- .../impl/ast_utils_keyword_usage.py | 432 +++++++++++++++++ .../robotframework_ls/impl/find_definition.py | 1 - .../src/robotframework_ls/impl/protocols.py | 2 +- .../robotframework_ls/impl/semantic_tokens.py | 20 +- .../robotframework_ls_tests/test_ast_utils.py | 202 ++++---- ...est_run_keyword_in_run_keyword_2_types.yml | 8 + ...st_run_keyword_in_run_keyword_2_usages.yml | 6 + .../test_run_keyword_in_run_keyword_types.yml | 5 + ...test_run_keyword_in_run_keyword_usages.yml | 5 + .../test_semantic_highlighting.py | 20 + 13 files changed, 665 insertions(+), 477 deletions(-) create mode 100644 robotframework-ls/src/robotframework_ls/impl/ast_utils_keyword_usage.py create mode 100644 robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_types.yml create mode 100644 robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_usages.yml create mode 100644 robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_types.yml create mode 100644 robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_usages.yml diff --git a/robotframework-ls/.settings/org.python.pydev.yaml b/robotframework-ls/.settings/org.python.pydev.yaml index a0ec7e5536..c8bac11908 100644 --- a/robotframework-ls/.settings/org.python.pydev.yaml +++ b/robotframework-ls/.settings/org.python.pydev.yaml @@ -16,9 +16,9 @@ MULTI_BLOCK_COMMENT_SHOW_ONLY_CLASS_NAME: true MULTI_BLOCK_COMMENT_SHOW_ONLY_FUNCTION_NAME: true PYDEV_TEST_RUNNER: '2' # PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n auto --tb=native -vv --force-regen -# PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n auto --tb=native -vv +PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n auto --tb=native -vv # PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n 0 --tb=native -vv -PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n 0 --tb=native -vv --force-regen +# PYDEV_TEST_RUNNER_DEFAULT_PARAMETERS: --capture=no -W ignore::DeprecationWarning -n 0 --tb=native -vv --force-regen PYDEV_USE_PYUNIT_VIEW: true SAVE_ACTIONS_ONLY_ON_WORKSPACE_FILES: true SINGLE_BLOCK_COMMENT_ALIGN_RIGHT: true diff --git a/robotframework-ls/docs/changelog.md b/robotframework-ls/docs/changelog.md index 283fa2a11b..615ad76bac 100644 --- a/robotframework-ls/docs/changelog.md +++ b/robotframework-ls/docs/changelog.md @@ -7,6 +7,7 @@ New in 0.48.0 (NEXT) - Properly consider that `For each input work item` from `rpaframework` receives a keyword as the first parameter. [#684](https://github.com/robocorp/robotframework-lsp/issues/684) - Added support for `Import Library` keyword. [#675](https://github.com/robocorp/robotframework-lsp/issues/675) - Implemented expand and shrink selection (use `Shift+Alt+Right` to select outer scope and `Shift+Alt+left` to deselect). +- Properly support constructs with nested usage of `Run Keyword` inside `Run Keyword`. [#686](https://github.com/robocorp/robotframework-lsp/issues/686) - A simpler TextMate grammar is now used to work better with bracket pair colorization (which is enabled by default in VSCode `1.67`). - Note that initially very little code will be highlighted until semantic highlighting takes over. - Updated `robotframework-tidy` to `2.2.0`. diff --git a/robotframework-ls/src/robotframework_ls/impl/ast_utils.py b/robotframework-ls/src/robotframework_ls/impl/ast_utils.py index 4561100aaf..022395d529 100644 --- a/robotframework-ls/src/robotframework_ls/impl/ast_utils.py +++ b/robotframework-ls/src/robotframework_ls/impl/ast_utils.py @@ -31,10 +31,6 @@ ) from robotframework_ls.impl.text_utilities import normalize_robot_name from robocorp_ls_core.basic import isinstance_name -from robotframework_ls.impl.keywords_in_args import ( - KEYWORD_NAME_TO_KEYWORD_INDEX, - KEYWORD_NAME_TO_CONDITION_INDEX, -) import functools import weakref import threading @@ -770,9 +766,9 @@ def iter_library_imports(ast) -> Iterator[NodeInfo[ILibraryImportNode]]: def _iter_library_imports_uncached(ast): try: - from robot.api.parsing import LibraryImport + from robot.api.parsing import LibraryImport # noqa except ImportError: - from robot.parsing.model.statements import LibraryImport + from robot.parsing.model.statements import LibraryImport # noqa yield from ast.iter_indexed("LibraryImport") for keyword_usage_info in iter_keyword_usage_tokens( @@ -1177,6 +1173,10 @@ def _add_match(found: set, tok: IRobotToken) -> bool: @_convert_ast_to_indexer def iter_variable_references(ast) -> Iterator[VarTokenInfo]: + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_handler, + ) + # TODO: This right now makes everything globally, we should have 2 versions, # one to resolve references which are global and another to resolve references # just inside some scope when dealing with local variables. @@ -1223,28 +1223,30 @@ def iter_variable_references(ast) -> Iterator[VarTokenInfo]: except: log.exception("Unable to tokenize: %s", token) - for usage_info in _iter_keyword_usage_tokens_first_level_uncached(ast): - args_as_keywords_handler = get_args_as_keywords_handler(usage_info.node) - if args_as_keywords_handler is None: - continue + for node_info in _iter_node_info_which_may_have_usage_info(ast): + stack = node_info.stack + node = node_info.node + keyword_usage_handler = obtain_keyword_usage_handler(stack, node) + if keyword_usage_handler is not None: + for usage_info in keyword_usage_handler.iter_keyword_usages_from_node(): - stack = usage_info.stack - node = usage_info.node - arg_i = 0 - for token in usage_info.node.tokens: - if token.type == token.ARGUMENT: - arg_i += 1 - if arg_i == 1: - if _is_store_keyword(usage_info.node): - continue - - next_tok_type = args_as_keywords_handler.next_tok_type(token) - if next_tok_type == args_as_keywords_handler.EXPRESSION: - for tok in iter_expression_variables(token): - if tok.type == token.VARIABLE: - if not _add_match(found, tok): + arg_i = 0 + for token in usage_info.node.tokens: + if token.type == token.ARGUMENT: + arg_i += 1 + if arg_i == 1: + if _is_store_keyword(usage_info.node): continue - yield VarTokenInfo(stack, node, tok, AdditionalVarInfo("$")) + + next_tok_type = keyword_usage_handler.get_token_type(token) + if next_tok_type == keyword_usage_handler.EXPRESSION: + for tok in iter_expression_variables(token): + if tok.type == token.VARIABLE: + if not _add_match(found, tok): + continue + yield VarTokenInfo( + stack, node, tok, AdditionalVarInfo("$") + ) for clsname in CLASSES_WTH_EXPRESSION_ARGUMENTS: for node_info in ast.iter_indexed(clsname): @@ -1313,157 +1315,27 @@ def _same_line_col(tok1: IRobotToken, tok2: IRobotToken): return tok1.lineno == tok2.lineno and tok1.col_offset == tok2.col_offset -def _build_keyword_usage( - stack, node, yield_only_for_token, current_tokens, yield_only_over_keyword_name -) -> Optional[KeywordUsageInfo]: - # Note: just check for line/col because the token could be changed - # (for instance, an EOL ' ' could be added to the token). - if not current_tokens: - return None - - keyword_at_index = 0 - keyword_token = current_tokens[keyword_at_index] - - if yield_only_for_token is not None: - if yield_only_over_keyword_name: - if not _same_line_col(yield_only_for_token, keyword_token): - return None - else: - for tok in current_tokens: - if _same_line_col(yield_only_for_token, tok): - break - else: - return None - - keyword_token = copy_token_replacing(keyword_token, type=keyword_token.KEYWORD) - new_tokens = [keyword_token] - new_tokens.extend(current_tokens[keyword_at_index + 1 :]) - - return KeywordUsageInfo( - stack, - node.__class__(new_tokens), - keyword_token, - keyword_token.value, - True, - ) - - -def _iter_keyword_usage_tokens_uncached_from_args( - stack, - node, - args_as_keywords_handler, - yield_only_for_token: Optional[IRobotToken] = None, - yield_only_over_keyword_name: bool = True, -): - # We may have multiple matches, so, we need to setup the appropriate book-keeping - current_tokens = [] - - iter_in = iter(node.tokens) - - for token in iter_in: - if token.type == token.ARGUMENT: - next_tok_type = args_as_keywords_handler.next_tok_type(token) - if next_tok_type == args_as_keywords_handler.KEYWORD: - current_tokens.append(token) - break - - for token in iter_in: - if token.type == token.ARGUMENT: - next_tok_type = args_as_keywords_handler.next_tok_type(token) - - if next_tok_type in ( - args_as_keywords_handler.CONTROL, - args_as_keywords_handler.EXPRESSION, - args_as_keywords_handler.IGNORE, - ): - # Don't add IF/ELSE IF/AND nor the condition. - continue - - if next_tok_type != args_as_keywords_handler.KEYWORD: - # Argument was now added to current_tokens. - current_tokens.append(token) - continue - - if current_tokens: - # Starting a new one (build for the previous). - usage_info = _build_keyword_usage( - stack, - node, - yield_only_for_token, - current_tokens, - yield_only_over_keyword_name, - ) - if usage_info is not None: - yield usage_info - - current_tokens = [token] - - else: - # Do one last iteration at the end to deal with the last one. - if current_tokens: - usage_info = _build_keyword_usage( - stack, - node, - yield_only_for_token, - current_tokens, - yield_only_over_keyword_name, - ) - if usage_info is not None: - yield usage_info - - -def _iter_keyword_usage_tokens_first_level_uncached(ast) -> Iterator[KeywordUsageInfo]: +@_convert_ast_to_indexer +def _iter_node_info_which_may_have_usage_info(ast): for clsname in ("KeywordCall",) + _CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_TUPLE: - for node_info in ast.iter_indexed(clsname): - stack = node_info.stack - node = node_info.node - usage_info = _create_keyword_usage_info(stack, node) - if usage_info is not None: - yield usage_info + yield from ast.iter_indexed(clsname) def _iter_keyword_usage_tokens_uncached( ast, collect_args_as_keywords: bool ) -> Iterator[KeywordUsageInfo]: - for usage_info in _iter_keyword_usage_tokens_first_level_uncached(ast): - yield usage_info - - if collect_args_as_keywords: - args_as_keywords_handler = get_args_as_keywords_handler(usage_info.node) - if args_as_keywords_handler is None: - continue - - yield from _iter_keyword_usage_tokens_uncached_from_args( - usage_info.stack, usage_info.node, args_as_keywords_handler - ) - - -def _create_keyword_usage_info(stack, node) -> Optional[KeywordUsageInfo]: - """ - If this is a keyword usage node, return information on it, otherwise, - returns None. - - :note: this goes hand-in-hand with get_keyword_name_token. - """ - from robot.api import Token - - if node.__class__.__name__ == "KeywordCall": - token_type = Token.KEYWORD - - elif node.__class__.__name__ in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET: - token_type = Token.NAME - - else: - return None - - node, token = _strip_node_and_token_bdd_prefix(node, token_type) - if token is None: - return None + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_handler, + ) - keyword_name = token.value - if keyword_name.lower() == "none": - return None - return KeywordUsageInfo(tuple(stack), node, token, keyword_name) + for node_info in _iter_node_info_which_may_have_usage_info(ast): + stack = node_info.stack + node = node_info.node + keyword_usage_handler = obtain_keyword_usage_handler( + stack, node, recursive=collect_args_as_keywords + ) + if keyword_usage_handler is not None: + yield from keyword_usage_handler.iter_keyword_usages_from_node() def create_keyword_usage_info_from_token( @@ -1477,204 +1349,17 @@ def create_keyword_usage_info_from_token( if we're in an argument that isn't itself a keyword call. """ if token.type == token.ARGUMENT: - args_as_keywords_handler = get_args_as_keywords_handler(node) - if args_as_keywords_handler is not None: - for v in _iter_keyword_usage_tokens_uncached_from_args( - stack, - node, - args_as_keywords_handler, - yield_only_for_token=token, - yield_only_over_keyword_name=False, - ): - return v - - return _create_keyword_usage_info(stack, node) - - -class _ConsiderArgsAsKeywordNames: - NONE = 0 - KEYWORD = 1 - EXPRESSION = 2 - CONTROL = 3 - IGNORE = 4 - - def __init__( - self, - node, - normalized_keyword_name, - consider_keyword_at_index, - consider_condition_at_index, - ): - self._node = node - self._normalized_keyword_name = normalized_keyword_name - self._consider_keyword_at_index = consider_keyword_at_index - self._consider_condition_at_index = consider_condition_at_index - self._current_arg = 0 - - # Run Keyword If is special because it has 'ELSE IF' / 'ELSE' - # which will then be be (cond, keyword) or just (keyword), so - # we need to provide keyword usages as needed. - if self._normalized_keyword_name == "runkeywordif": - self.next_tok_type = self._next_tok_type_run_keyword_if - elif self._normalized_keyword_name == "foreachinputworkitem": - self.next_tok_type = self._next_tok_type_for_each_input_work_item - elif self._normalized_keyword_name == "runkeywords": - found = False - for token in node.tokens: - if "AND" == token.value: - found = True - break - if found: - self.next_tok_type = self._next_tok_type_run_keywords - else: - self.next_tok_type = self._consider_each_arg_as_keyword - - self._stack_kind = None - self._stack = None - self._started_match = False - - def next_tok_type_as_str(self, token) -> str: - tok_type = self.next_tok_type(token) - if tok_type == self.NONE: - return "" - if tok_type == self.EXPRESSION: - return "" - if tok_type == self.KEYWORD: - return "" - if tok_type == self.CONTROL: - return "" - if tok_type == self.IGNORE: - return "" - raise AssertionError(f"Unexpected: {tok_type}") - - def next_tok_type(self, token) -> int: # pylint: disable=method-hidden - assert token.type == token.ARGUMENT - self._current_arg += 1 - - if self._current_arg == self._consider_condition_at_index: - return self.EXPRESSION - - if self._current_arg == self._consider_keyword_at_index: - return self.KEYWORD - - return self.NONE - - def _next_tok_type_for_each_input_work_item(self, token): - from robotframework_ls.impl.variable_resolve import find_split_index - - assert token.type == token.ARGUMENT - self._current_arg += 1 - - if self._current_arg == self._consider_keyword_at_index: - return self.KEYWORD - - i = find_split_index(token.value) - if i > 0: - v = normalize_robot_name(token.value[:i]) - if v in ("itemslimit", "returnresults"): - return self.IGNORE - - return self.NONE - - def _next_tok_type_run_keyword_if(self, token): - assert token.type == token.ARGUMENT - - self._current_arg += 1 - - if token.value == "ELSE IF": - self._started_match = True - self._stack = [] - self._stack_kind = token.value - return self.CONTROL - elif token.value == "ELSE": - self._started_match = True - self._stack = [] - self._stack_kind = token.value - return self.CONTROL - - else: - self._started_match = False - if self._stack is not None: - self._stack.append(token) - - if self._stack is not None: - if self._stack_kind == "ELSE IF": - if len(self._stack) == 1: - return self.EXPRESSION - return self.KEYWORD if len(self._stack) == 2 else self.NONE - - if self._stack_kind == "ELSE": - return self.KEYWORD if len(self._stack) == 1 else self.NONE - - if self._current_arg == self._consider_condition_at_index: - return self.EXPRESSION - - if self._current_arg == self._consider_keyword_at_index: - return self.KEYWORD - - return self.NONE - - def _consider_each_arg_as_keyword(self, token): - assert token.type == token.ARGUMENT - return self.KEYWORD - - def _next_tok_type_run_keywords(self, token): - assert token.type == token.ARGUMENT - - self._current_arg += 1 - - if token.value == "AND": - self._started_match = True - self._stack = [] - self._stack_kind = token.value - return self.CONTROL - - else: - self._started_match = False - if self._stack is not None: - self._stack.append(token) - - if self._stack is not None: - if self._stack_kind == "AND": - return self.KEYWORD if len(self._stack) == 1 else self.NONE - - if self._current_arg == self._consider_keyword_at_index: - return self.KEYWORD - return self.NONE - - -def get_args_as_keywords_handler(node) -> Optional[_ConsiderArgsAsKeywordNames]: - from robot.api import Token - - if node.__class__.__name__ == "KeywordCall": - token_type = Token.KEYWORD + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_for_token, + ) - elif node.__class__.__name__ in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET: - token_type = Token.NAME + return obtain_keyword_usage_for_token(stack, node, token) - else: - return None + from robotframework_ls.impl.ast_utils_keyword_usage import ( + _create_root_keyword_usage_info, + ) - node_keyword_name = node.get_token(token_type) - if node_keyword_name and node_keyword_name.value: - normalized_keyword_name = normalize_robot_name(node_keyword_name.value) - consider_keyword_at_index = KEYWORD_NAME_TO_KEYWORD_INDEX.get( - normalized_keyword_name - ) - consider_condition_at_index = KEYWORD_NAME_TO_CONDITION_INDEX.get( - normalized_keyword_name - ) - if ( - consider_keyword_at_index is not None - or consider_condition_at_index is not None - ): - return _ConsiderArgsAsKeywordNames( - node, - normalized_keyword_name, - consider_keyword_at_index, - consider_condition_at_index, - ) - return None + return _create_root_keyword_usage_info(stack, node) def get_keyword_name_token( @@ -1699,16 +1384,17 @@ def get_keyword_name_token( return _strip_token_bdd_prefix(token) if token.type == token.ARGUMENT and not token.value.strip().endswith("}"): - args_as_keywords_handler = get_args_as_keywords_handler(node) - if args_as_keywords_handler is not None: - for _ in _iter_keyword_usage_tokens_uncached_from_args( - stack, - node, - args_as_keywords_handler, - yield_only_for_token=token, - yield_only_over_keyword_name=accept_only_over_keyword_name, - ): - return token + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_for_token, + ) + + keyword_usage = obtain_keyword_usage_for_token(stack, node, token) + if keyword_usage is not None: + if accept_only_over_keyword_name: + if _same_line_col(keyword_usage.token, token): + return token + else: + return keyword_usage.token return None diff --git a/robotframework-ls/src/robotframework_ls/impl/ast_utils_keyword_usage.py b/robotframework-ls/src/robotframework_ls/impl/ast_utils_keyword_usage.py new file mode 100644 index 0000000000..70af252964 --- /dev/null +++ b/robotframework-ls/src/robotframework_ls/impl/ast_utils_keyword_usage.py @@ -0,0 +1,432 @@ +from typing import Optional, Iterator, Tuple + +from robotframework_ls.impl.protocols import KeywordUsageInfo, IRobotToken +from robotframework_ls.impl.keywords_in_args import KEYWORD_NAME_TO_KEYWORD_INDEX +from robotframework_ls.impl.keywords_in_args import KEYWORD_NAME_TO_CONDITION_INDEX + +TOK_TYPE_NONE = 0 +TOK_TYPE_KEYWORD = 1 +TOK_TYPE_EXPRESSION = 2 +TOK_TYPE_CONTROL = 3 +TOK_TYPE_IGNORE = 4 + + +def _tok_type_as_str(tok_type) -> str: + if tok_type == TOK_TYPE_NONE: + return "" + if tok_type == TOK_TYPE_EXPRESSION: + return "" + if tok_type == TOK_TYPE_KEYWORD: + return "" + if tok_type == TOK_TYPE_CONTROL: + return "" + if tok_type == TOK_TYPE_IGNORE: + return "" + raise AssertionError(f"Unexpected: {tok_type}") + + +class _ConsiderArgsAsKeywordNames: + NONE = TOK_TYPE_NONE + KEYWORD = TOK_TYPE_KEYWORD + EXPRESSION = TOK_TYPE_EXPRESSION + CONTROL = TOK_TYPE_CONTROL + IGNORE = TOK_TYPE_IGNORE + + def __init__( + self, + node, + normalized_keyword_name, + consider_keyword_at_index, + consider_condition_at_index, + ): + self._normalized_keyword_name = normalized_keyword_name + self._consider_keyword_at_index = consider_keyword_at_index + self._consider_condition_at_index = consider_condition_at_index + self._current_arg = 0 + + # Run Keyword If is special because it has 'ELSE IF' / 'ELSE' + # which will then be be (cond, keyword) or just (keyword), so + # we need to provide keyword usages as needed. + if self._normalized_keyword_name == "runkeywordif": + self.next_tok_type = self._next_tok_type_run_keyword_if + elif self._normalized_keyword_name == "foreachinputworkitem": + self.next_tok_type = self._next_tok_type_for_each_input_work_item + elif self._normalized_keyword_name == "runkeywords": + found = False + for token in node.tokens: + if "AND" == token.value: + found = True + break + if found: + self.next_tok_type = self._next_tok_type_run_keywords + else: + self.next_tok_type = self._consider_each_arg_as_keyword + + self._stack_kind = None + self._stack = None + self._started_match = False + + def next_tok_type(self, token) -> int: # pylint: disable=method-hidden + assert token.type == token.ARGUMENT + self._current_arg += 1 + + if self._current_arg == self._consider_condition_at_index: + return self.EXPRESSION + + if self._current_arg == self._consider_keyword_at_index: + return self.KEYWORD + + return self.NONE + + def _next_tok_type_for_each_input_work_item(self, token): + from robotframework_ls.impl.variable_resolve import find_split_index + from robotframework_ls.impl.text_utilities import normalize_robot_name + + assert token.type == token.ARGUMENT + self._current_arg += 1 + + if self._current_arg == self._consider_keyword_at_index: + return self.KEYWORD + + i = find_split_index(token.value) + if i > 0: + v = normalize_robot_name(token.value[:i]) + if v in ("itemslimit", "returnresults"): + return self.IGNORE + + return self.NONE + + def _next_tok_type_run_keyword_if(self, token): + assert token.type == token.ARGUMENT + + self._current_arg += 1 + + if token.value == "ELSE IF": + self._started_match = True + self._stack = [] + self._stack_kind = token.value + return self.CONTROL + elif token.value == "ELSE": + self._started_match = True + self._stack = [] + self._stack_kind = token.value + return self.CONTROL + + else: + self._started_match = False + if self._stack is not None: + self._stack.append(token) + + if self._stack is not None: + if self._stack_kind == "ELSE IF": + if len(self._stack) == 1: + return self.EXPRESSION + return self.KEYWORD if len(self._stack) == 2 else self.NONE + + if self._stack_kind == "ELSE": + return self.KEYWORD if len(self._stack) == 1 else self.NONE + + if self._current_arg == self._consider_condition_at_index: + return self.EXPRESSION + + if self._current_arg == self._consider_keyword_at_index: + return self.KEYWORD + + return self.NONE + + def _consider_each_arg_as_keyword(self, token): + assert token.type == token.ARGUMENT + return self.KEYWORD + + def _next_tok_type_run_keywords(self, token): + assert token.type == token.ARGUMENT + + self._current_arg += 1 + + if token.value == "AND": + self._started_match = True + self._stack = [] + self._stack_kind = token.value + return self.CONTROL + + else: + self._started_match = False + if self._stack is not None: + self._stack.append(token) + + if self._stack is not None: + if self._stack_kind == "AND": + return self.KEYWORD if len(self._stack) == 1 else self.NONE + + if self._current_arg == self._consider_keyword_at_index: + return self.KEYWORD + return self.NONE + + +def _create_root_keyword_usage_info(stack, node) -> Optional[KeywordUsageInfo]: + """ + If this is a keyword usage node, return information on it, otherwise, + returns None. + + :note: this goes hand-in-hand with get_keyword_name_token. + """ + from robot.api import Token + from robotframework_ls.impl.ast_utils import ( + CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET, + ) + from robotframework_ls.impl.ast_utils import _strip_node_and_token_bdd_prefix + + if node.__class__.__name__ == "KeywordCall": + token_type = Token.KEYWORD + + elif node.__class__.__name__ in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET: + token_type = Token.NAME + + else: + return None + + node, token = _strip_node_and_token_bdd_prefix(node, token_type) + if token is None: + return None + + keyword_name = token.value + if keyword_name.lower() == "none": + return None + return KeywordUsageInfo(tuple(stack), node, token, keyword_name) + + +def _build_keyword_usage(stack, node, current_tokens) -> Optional[KeywordUsageInfo]: + from robotframework_ls.impl.ast_utils import copy_token_replacing + + # Note: just check for line/col because the token could be changed + # (for instance, an EOL ' ' could be added to the token). + if not current_tokens: + return None + + keyword_at_index = 0 + keyword_token = current_tokens[keyword_at_index] + + keyword_token = copy_token_replacing(keyword_token, type=keyword_token.KEYWORD) + new_tokens = [keyword_token] + new_tokens.extend(current_tokens[keyword_at_index + 1 :]) + + return KeywordUsageInfo( + stack, + node.__class__(new_tokens), + keyword_token, + keyword_token.value, + True, + ) + + +def _iter_keyword_usage_info_uncached_from_args( + stack, node, args_as_keywords_handler, token_line_col_to_type +) -> Iterator[KeywordUsageInfo]: + # We may have multiple matches, so, we need to setup the appropriate book-keeping + current_tokens = [] + + iter_in = iter(node.tokens) + + for token in iter_in: + if token.type == token.ARGUMENT: + next_tok_type = args_as_keywords_handler.next_tok_type(token) + token_line_col_to_type[(token.lineno, token.col_offset)] = next_tok_type + if next_tok_type == args_as_keywords_handler.KEYWORD: + current_tokens.append(token) + break + + for token in iter_in: + if token.type == token.ARGUMENT: + next_tok_type = args_as_keywords_handler.next_tok_type(token) + token_line_col_to_type[(token.lineno, token.col_offset)] = next_tok_type + + if next_tok_type in ( + args_as_keywords_handler.CONTROL, + args_as_keywords_handler.EXPRESSION, + args_as_keywords_handler.IGNORE, + ): + # Don't add IF/ELSE IF/AND nor the condition. + continue + + if next_tok_type != args_as_keywords_handler.KEYWORD: + # Argument was now added to current_tokens. + current_tokens.append(token) + continue + + if current_tokens: + # Starting a new one (build for the previous). + usage_info = _build_keyword_usage( + stack, + node, + current_tokens, + ) + if usage_info is not None: + yield usage_info + + current_tokens = [token] + + else: + # Do one last iteration at the end to deal with the last one. + if current_tokens: + usage_info = _build_keyword_usage( + stack, + node, + current_tokens, + ) + if usage_info is not None: + yield usage_info + + +class _KeywordUsageHandler: + """ + We have the following main use-cases when dealing with keyword usages (also + known as keyword references): + + 1. Obtain the usages (keyword call/arguments) for code-analysis. + 2. For each token in a keyword usage, know what it maps to ( + keyword name, expression, control, regular argument, ...) + + Also, it needs to be considered that a given keyword usage may have + other usages within it, so, the _KeywordUsageHandler is an API to help + make things more streamlined for each use-case. + """ + + NONE = TOK_TYPE_NONE + KEYWORD = TOK_TYPE_KEYWORD + EXPRESSION = TOK_TYPE_EXPRESSION + CONTROL = TOK_TYPE_CONTROL + IGNORE = TOK_TYPE_IGNORE + + def __init__(self, stack, node, recursive): + self.node = node + self.stack = stack + self._recursive = recursive + + # We store as line/col the type info and not the actual token because we + # may create dummy tokens along the way and in this case we're + # interested in the positions. + self._token_line_col_to_type = {} + self._keyword_usages_from_node_cache = None + + def _ensure_cached(self): + if self._keyword_usages_from_node_cache is None: + self._keyword_usages_from_node_cache = tuple( + self._iter_keyword_usages_from_node() + ) + + def iter_keyword_usages_from_node(self) -> Iterator[KeywordUsageInfo]: + self._ensure_cached() + yield from iter(self._keyword_usages_from_node_cache) + + def _iter_keyword_usages_from_node(self) -> Iterator[KeywordUsageInfo]: + """ + Note: the iteration order is guaranteed and it's from the inside to + the outside (because when matching tokens we want to match more + specific ones before outer ones). + """ + + root_keyword_usage_info = _create_root_keyword_usage_info(self.stack, self.node) + if root_keyword_usage_info is None: + return + + # Ok, we have the root one, now, we need to recursively detect others. + if self._recursive: + yield from self._iter_keyword_usages_inside_keyword_usage( + root_keyword_usage_info + ) + + yield root_keyword_usage_info + + def _iter_keyword_usages_inside_keyword_usage( + self, root_keyword_usage_info: KeywordUsageInfo + ) -> Iterator[KeywordUsageInfo]: + from robotframework_ls.impl.text_utilities import normalize_robot_name + + # Now, we have the root, determine if it can have other usages inside itself... + normalized_keyword_name = normalize_robot_name(root_keyword_usage_info.name) + consider_keyword_at_index = KEYWORD_NAME_TO_KEYWORD_INDEX.get( + normalized_keyword_name + ) + consider_condition_at_index = KEYWORD_NAME_TO_CONDITION_INDEX.get( + normalized_keyword_name + ) + if ( + consider_keyword_at_index is not None + or consider_condition_at_index is not None + ): + args_as_keywords_handler = _ConsiderArgsAsKeywordNames( + root_keyword_usage_info.node, + normalized_keyword_name, + consider_keyword_at_index, + consider_condition_at_index, + ) + + for kw_usage in _iter_keyword_usage_info_uncached_from_args( + self.stack, + root_keyword_usage_info.node, + args_as_keywords_handler, + self._token_line_col_to_type, + ): + yield from self._iter_keyword_usages_inside_keyword_usage(kw_usage) + yield kw_usage + + def get_token_type(self, tok: IRobotToken) -> int: + """ + :return: + TOK_TYPE_NONE = 0 + TOK_TYPE_KEYWORD = 1 + TOK_TYPE_EXPRESSION = 2 + TOK_TYPE_CONTROL = 3 + TOK_TYPE_IGNORE = 4 + """ + self._ensure_cached() + return self._token_line_col_to_type.get( + (tok.lineno, tok.col_offset), TOK_TYPE_NONE + ) + + def get_token_type_as_str(self, token: IRobotToken) -> str: + return _tok_type_as_str(self.get_token_type(token)) + + def iter_tokens_with_type(self) -> Iterator[Tuple[IRobotToken, int]]: + self._ensure_cached() + for tok in self.node.tokens: + yield tok, self._token_line_col_to_type.get( + (tok.lineno, tok.col_offset), TOK_TYPE_NONE + ) + + def get_keyword_usage_for_token_line_col( + self, line, col + ) -> Optional[KeywordUsageInfo]: + self._ensure_cached() + for kw_usage in self.iter_keyword_usages_from_node(): + for token in kw_usage.node.tokens: + if token.lineno == line and token.col_offset == col: + return kw_usage + return None + + +def obtain_keyword_usage_handler( + stack, node, recursive=True +) -> Optional[_KeywordUsageHandler]: + from robotframework_ls.impl.ast_utils import ( + CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET, + ) + + if ( + node.__class__.__name__ != "KeywordCall" + and node.__class__.__name__ + not in CLASSES_WITH_ARGUMENTS_AS_KEYWORD_CALLS_AS_SET + ): + return None + + return _KeywordUsageHandler(stack, node, recursive=recursive) + + +def obtain_keyword_usage_for_token(stack, node, token) -> Optional[KeywordUsageInfo]: + keyword_usage_handler = obtain_keyword_usage_handler(stack, node) + if keyword_usage_handler is not None: + keyword_usage = keyword_usage_handler.get_keyword_usage_for_token_line_col( + token.lineno, token.col_offset + ) + return keyword_usage + return None diff --git a/robotframework-ls/src/robotframework_ls/impl/find_definition.py b/robotframework-ls/src/robotframework_ls/impl/find_definition.py index 46f0a91525..02de7d82a5 100644 --- a/robotframework-ls/src/robotframework_ls/impl/find_definition.py +++ b/robotframework-ls/src/robotframework_ls/impl/find_definition.py @@ -388,7 +388,6 @@ def find_keyword_definition( token_info.stack, token_info.node, token_info.token, - accept_only_over_keyword_name=False, ) if token is None: if token_info.token.type == token_info.token.KEYWORD_NAME: diff --git a/robotframework-ls/src/robotframework_ls/impl/protocols.py b/robotframework-ls/src/robotframework_ls/impl/protocols.py index 4dd5977e41..4ec03c6a58 100644 --- a/robotframework-ls/src/robotframework_ls/impl/protocols.py +++ b/robotframework-ls/src/robotframework_ls/impl/protocols.py @@ -291,7 +291,7 @@ def __init__( ): self.stack = stack self.node = node - self.token = token + self.token = token # This is actually the keyword name token. self.name = name self._is_argument_usage = is_argument_usage diff --git a/robotframework-ls/src/robotframework_ls/impl/semantic_tokens.py b/robotframework-ls/src/robotframework_ls/impl/semantic_tokens.py index b788d96531..32c7ba367c 100644 --- a/robotframework-ls/src/robotframework_ls/impl/semantic_tokens.py +++ b/robotframework-ls/src/robotframework_ls/impl/semantic_tokens.py @@ -259,15 +259,15 @@ def _tokenize_token( if not in_documentation: in_expression = is_node_with_expression_argument(node) - if scope.args_as_keywords_handler is not None: - tok_type = scope.args_as_keywords_handler.next_tok_type(use_token) - if tok_type == scope.args_as_keywords_handler.KEYWORD: + if scope.keyword_usage_handler is not None: + tok_type = scope.keyword_usage_handler.get_token_type(use_token) + if tok_type == scope.keyword_usage_handler.KEYWORD: use_token_type = KEYWORD - elif tok_type == scope.args_as_keywords_handler.EXPRESSION: + elif tok_type == scope.keyword_usage_handler.EXPRESSION: in_expression = True - elif tok_type == scope.args_as_keywords_handler.CONTROL: + elif tok_type == scope.keyword_usage_handler.CONTROL: yield use_token, CONTROL_INDEX return @@ -513,13 +513,15 @@ def __init__(self, context: ICompletionContext): self.imported_libraries = set(_iter_dependent_names(context)) # Note: it's set for the node and then reused for all the tokens in that same node. - self.args_as_keywords_handler: Any = None + self.keyword_usage_handler: Any = None self.get_index_from_rf_token_type = RF_TOKEN_TYPE_TO_TOKEN_TYPE_INDEX.get self.get_index_from_internal_token_type = TOKEN_TYPE_TO_INDEX.__getitem__ def semantic_tokens_full(context: ICompletionContext): + from robotframework_ls.impl import ast_utils_keyword_usage + try: ast = context.doc.get_ast() except: @@ -536,13 +538,13 @@ def semantic_tokens_full(context: ICompletionContext): last_column = 0 scope = _SemanticTokensScope(context) - for _stack, node in ast_utils.iter_all_nodes_recursive(ast): + for stack, node in ast_utils.iter_all_nodes_recursive(ast): if monitor: monitor.check_cancelled() tokens = getattr(node, "tokens", None) if tokens: - scope.args_as_keywords_handler = ast_utils.get_args_as_keywords_handler( - node + scope.keyword_usage_handler = ( + ast_utils_keyword_usage.obtain_keyword_usage_handler(stack, node) ) for token in tokens: diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils.py b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils.py index 438f4bef00..34819ba84b 100644 --- a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils.py +++ b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils.py @@ -205,146 +205,170 @@ def test_ast_extract_expression_tokens(data_regression): data_regression.check(collected) -def test_in_expression(): +def check_value_and_tok_type(text, expected): from robotframework_ls.impl.robot_workspace import RobotDocument from robotframework_ls.impl import ast_utils + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_handler, + ) document = RobotDocument( "uri", - """ -*** Keyword ** -my - Run Keyword If $v1 == ${v2} CallThis""", + text, ) ast = document.get_ast() found = [] for node_info in ast_utils.iter_indexed(ast, "KeywordCall"): - handler = ast_utils.get_args_as_keywords_handler(node_info.node) + keyword_usage_handler = obtain_keyword_usage_handler( + node_info.stack, node_info.node + ) for tok in node_info.node.tokens: if tok.type == tok.ARGUMENT: found.append( - f"{tok.value} = {handler.next_tok_type_as_str(tok)}", + f"{tok.value} = {keyword_usage_handler.get_token_type_as_str(tok)}", ) - assert found == [ - "$v1 == ${v2} = ", - "CallThis = ", - ] + assert found == expected -def test_in_expression_else_if(): - from robotframework_ls.impl.robot_workspace import RobotDocument - from robotframework_ls.impl import ast_utils +def test_in_expression(): + check_value_and_tok_type( + """ +*** Keyword ** +my + Run Keyword If $v1 == ${v2} CallThis + """, + [ + "$v1 == ${v2} = ", + "CallThis = ", + ], + ) - document = RobotDocument( - "uri", + +def test_in_expression_else_if(): + check_value_and_tok_type( """ *** Keyword ** my Run Keyword If $v1 == ${v2} CallThis ELSE IF $v1 == ${v2} CallThis""", + [ + "$v1 == ${v2} = ", + "CallThis = ", + "ELSE IF = ", + "$v1 == ${v2} = ", + "CallThis = ", + ], ) - ast = document.get_ast() - found = [] - for node_info in ast_utils.iter_indexed(ast, "KeywordCall"): - handler = ast_utils.get_args_as_keywords_handler(node_info.node) - for tok in node_info.node.tokens: - if tok.type == tok.ARGUMENT: - found.append( - f"{tok.value} = {handler.next_tok_type_as_str(tok)}", - ) - - assert found == [ - "$v1 == ${v2} = ", - "CallThis = ", - "ELSE IF = ", - "$v1 == ${v2} = ", - "CallThis = ", - ] def test_for_each_input_work_item(): - from robotframework_ls.impl.robot_workspace import RobotDocument - from robotframework_ls.impl import ast_utils - - document = RobotDocument( - "uri", + check_value_and_tok_type( """ *** Keyword ** my For each input work item No operation items_limit=0 return_results=False invalid_arg=True""", + [ + "No operation = ", + "items_limit=0 = ", + "return_results=False = ", + "invalid_arg=True = ", + ], ) - ast = document.get_ast() - found = [] - for node_info in ast_utils.iter_indexed(ast, "KeywordCall"): - handler = ast_utils.get_args_as_keywords_handler(node_info.node) - for tok in node_info.node.tokens: - if tok.type == tok.ARGUMENT: - found.append( - f"{tok.value} = {handler.next_tok_type_as_str(tok)}", - ) - - assert found == [ - "No operation = ", - "items_limit=0 = ", - "return_results=False = ", - "invalid_arg=True = ", - ] def test_run_keywords_1(): - from robotframework_ls.impl.robot_workspace import RobotDocument - from robotframework_ls.impl import ast_utils - - document = RobotDocument( - "uri", + check_value_and_tok_type( """ *** Keyword ** my Run Keywords CallThis Arg AND CallThis2""", + [ + "CallThis = ", + "Arg = ", + "AND = ", + "CallThis2 = ", + ], ) - ast = document.get_ast() - found = [] - for node_info in ast_utils.iter_indexed(ast, "KeywordCall"): - handler = ast_utils.get_args_as_keywords_handler(node_info.node) - for tok in node_info.node.tokens: - if tok.type == tok.ARGUMENT: - found.append( - f"{tok.value} = {handler.next_tok_type_as_str(tok)}", - ) - - assert found == [ - "CallThis = ", - "Arg = ", - "AND = ", - "CallThis2 = ", - ] def test_run_keywords_2(): + check_value_and_tok_type( + """ +*** Keyword ** +my + Run Keywords CallThis CallThis2""", + [ + "CallThis = ", + "CallThis2 = ", + ], + ) + + +def check_keyword_usage_handler(text, data_regression, prefix_basename): from robotframework_ls.impl.robot_workspace import RobotDocument from robotframework_ls.impl import ast_utils + from robotframework_ls.impl.ast_utils_keyword_usage import ( + obtain_keyword_usage_handler, + ) document = RobotDocument( "uri", - """ -*** Keyword ** -my - Run Keywords CallThis CallThis2""", + text, ) ast = document.get_ast() found = [] + found_types = [] + line_col_to_usage = {} for node_info in ast_utils.iter_indexed(ast, "KeywordCall"): - handler = ast_utils.get_args_as_keywords_handler(node_info.node) - for tok in node_info.node.tokens: - if tok.type == tok.ARGUMENT: - found.append( - f"{tok.value} = {handler.next_tok_type_as_str(tok)}", - ) + keyword_usage_handler = obtain_keyword_usage_handler( + node_info.stack, node_info.node + ) - assert found == [ - "CallThis = ", - "CallThis2 = ", - ] + for keyword_usage in keyword_usage_handler.iter_keyword_usages_from_node(): + toks = [] + for t in keyword_usage.node.tokens: + key = (t.lineno, t.col_offset) + if key not in line_col_to_usage: + line_col_to_usage[key] = keyword_usage + toks.append(t.value) + found.append( + f"name:{keyword_usage.name}, is_arg_usage: {keyword_usage._is_argument_usage}, tokens: {', '.join(toks)}" + ) + + for token, token_type in keyword_usage_handler.iter_tokens_with_type(): + if token.value.strip(): + found_types.append(f"{token.value.strip()}, {token.type}, {token_type}") + + data_regression.check(found, basename=prefix_basename + "_usages") + data_regression.check(found_types, basename=prefix_basename + "_types") + + for (line, col), kw_usage in line_col_to_usage.items(): + assert ( + keyword_usage_handler.get_keyword_usage_for_token_line_col(line, col) + == kw_usage + ) + + +def test_run_keyword_in_run_keyword(data_regression): + check_keyword_usage_handler( + """ +*** Keyword ** +my + Run keyword Run keyword Run Keyword Log foo""", + data_regression, + "test_run_keyword_in_run_keyword", + ) + + +def test_run_keyword_in_run_keyword_2(data_regression): + check_keyword_usage_handler( + """ +*** Keyword ** +my + Run keywords Run keyword Log1 bar AND Run Keyword Log2 foo""", + data_regression, + "test_run_keyword_in_run_keyword_2", + ) def test_keyword_usage_stack(): diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_types.yml b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_types.yml new file mode 100644 index 0000000000..5de00b8664 --- /dev/null +++ b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_types.yml @@ -0,0 +1,8 @@ +- Run keywords, KEYWORD, 0 +- Run keyword, ARGUMENT, 1 +- Log1, ARGUMENT, 1 +- bar, ARGUMENT, 0 +- AND, ARGUMENT, 3 +- Run Keyword, ARGUMENT, 1 +- Log2, ARGUMENT, 1 +- foo, ARGUMENT, 0 diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_usages.yml b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_usages.yml new file mode 100644 index 0000000000..b5098f1f4f --- /dev/null +++ b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_2_usages.yml @@ -0,0 +1,6 @@ +- 'name:Log1, is_arg_usage: True, tokens: Log1, bar' +- 'name:Run keyword, is_arg_usage: True, tokens: Run keyword, Log1, bar' +- 'name:Log2, is_arg_usage: True, tokens: Log2, foo' +- 'name:Run Keyword, is_arg_usage: True, tokens: Run Keyword, Log2, foo' +- 'name:Run keywords, is_arg_usage: False, tokens: , Run keywords, , Run keyword, , + Log1, , bar, , AND, , Run Keyword, , Log2, , foo, ' diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_types.yml b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_types.yml new file mode 100644 index 0000000000..34fe8554b5 --- /dev/null +++ b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_types.yml @@ -0,0 +1,5 @@ +- Run keyword, KEYWORD, 0 +- Run keyword, ARGUMENT, 1 +- Run Keyword, ARGUMENT, 1 +- Log, ARGUMENT, 1 +- foo, ARGUMENT, 0 diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_usages.yml b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_usages.yml new file mode 100644 index 0000000000..6985e087d1 --- /dev/null +++ b/robotframework-ls/tests/robotframework_ls_tests/test_ast_utils/test_run_keyword_in_run_keyword_usages.yml @@ -0,0 +1,5 @@ +- 'name:Log, is_arg_usage: True, tokens: Log, foo' +- 'name:Run Keyword, is_arg_usage: True, tokens: Run Keyword, Log, foo' +- 'name:Run keyword, is_arg_usage: True, tokens: Run keyword, Run Keyword, Log, foo' +- 'name:Run keyword, is_arg_usage: False, tokens: , Run keyword, , Run keyword, , + Run Keyword, , Log, , foo, ' diff --git a/robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py b/robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py index 1692cad124..ff45f0ac23 100644 --- a/robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py +++ b/robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py @@ -1327,3 +1327,23 @@ def test_semantic_import_library(workspace): ("Col", "argumentValue"), ], ) + + +def test_semantic_run_keyword_in_run_keyword(workspace): + check_simple( + workspace, + """ +*** Test Cases *** +Test case 1 + Run keyword Run keyword Run Keyword Log foo +""", + [ + ("*** Test Cases ***", "header"), + ("Test case 1", "testCaseName"), + ("Run keyword", "keywordNameCall"), + ("Run keyword", "keywordNameCall"), + ("Run Keyword", "keywordNameCall"), + ("Log", "keywordNameCall"), + ("foo", "argumentValue"), + ], + )