Skip to content

Commit

Permalink
Add support for Import Library keyword. Fixes #675
Browse files Browse the repository at this point in the history
  • Loading branch information
fabioz committed May 18, 2022
1 parent f869bee commit 478ebf8
Show file tree
Hide file tree
Showing 5 changed files with 106 additions and 4 deletions.
56 changes: 52 additions & 4 deletions robotframework-ls/src/robotframework_ls/impl/ast_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,10 +287,13 @@ def _obtain_ast_indexer(ast):
def _convert_ast_to_indexer(func):
@functools.wraps(func)
def new_func(ast, *args, **kwargs):
try:
indexer = ast.__ast_indexer__
except:
indexer = ast.__ast_indexer__ = _ASTIndexer(ast)
if hasattr(ast, "iter_indexed"):
indexer = ast
else:
try:
indexer = ast.__ast_indexer__
except:
indexer = ast.__ast_indexer__ = _ASTIndexer(ast)

return func(indexer, *args, **kwargs)

Expand Down Expand Up @@ -761,7 +764,52 @@ def is_setting_section_node_info(node_info: NodeInfo) -> bool:

@_convert_ast_to_indexer
def iter_library_imports(ast) -> Iterator[NodeInfo[ILibraryImportNode]]:
cache_key = "iter_library_imports"
yield from ast.iter_cached(cache_key, _iter_library_imports_uncached)


def _iter_library_imports_uncached(ast):
from robot.api.parsing import LibraryImport

yield from ast.iter_indexed("LibraryImport")
for keyword_usage_info in iter_keyword_usage_tokens(
ast, collect_args_as_keywords=True
):
if normalize_robot_name(keyword_usage_info.name) == "importlibrary":
# Create a LibraryImport node based on the keyword usage.
use_tokens = []
iter_in = iter(keyword_usage_info.node.tokens)
for token in iter_in:
if token.type == token.KEYWORD:
# Skip the 'Import Library' keyword name.
break
else:
continue

# Get the first non-separator token
for token in iter_in:
if token.type == token.SEPARATOR:
continue
use_tokens.append(copy_token_replacing(token, type=token.NAME))
break

for token in iter_in:
if token.type == token.ARGUMENT and token.value == "WITH NAME":
use_tokens.append(copy_token_replacing(token, type=token.WITH_NAME))
for token in iter_in:
if token.type == token.ARGUMENT:
use_tokens.append(
copy_token_replacing(token, type=token.NAME)
)
else:
use_tokens.append(token)

else:
use_tokens.append(token)

if use_tokens:
node = LibraryImport(use_tokens)
yield NodeInfo(keyword_usage_info.stack, node)


@_convert_ast_to_indexer
Expand Down
1 change: 1 addition & 0 deletions robotframework-ls/src/robotframework_ls/impl/protocols.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ def get_tokens(self, name: str) -> List[IRobotToken]:
class ILibraryImportNode(INode, Protocol):
name: str
alias: Optional[str]
args: Optional[Sequence[str]]


class IKeywordNode(INode, Protocol):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,8 @@ def semantic_tokens_range(context, range):
def _tokenize_token(
node, use_token, scope: "_SemanticTokensScope"
) -> Iterator[Tuple[IRobotToken, int]]:
from robotframework_ls.impl.text_utilities import normalize_robot_name

if use_token.type in (use_token.EOL, use_token.SEPARATOR):
# Fast way out for the most common tokens (which have no special handling).
return
Expand Down Expand Up @@ -321,6 +323,12 @@ def _tokenize_token(
if in_documentation:
equals_pos = -1
else:
if first_token.value == "WITH NAME":
value = node.get_value(use_token.KEYWORD)
if value and normalize_robot_name(value) == "importlibrary":
yield first_token, CONTROL_INDEX
return

equals_pos = find_split_index(first_token.value)
if equals_pos != -1:
# Found an equals... let's check if it's not a 'catenate', which
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1936,3 +1936,29 @@ def get_variables(*args):
Should Be Equal ${LIST} ${EXP LIST}
"""
_collect_errors(workspace, doc, data_regression, basename="no_error")


def test_import_library_keyword(workspace, libspec_manager, data_regression):
workspace.set_root("case2", libspec_manager=libspec_manager)
doc = workspace.put_doc("case2.robot")
doc.source = """
*** Test Cases ***
Test case 1
${lst}= Evaluate []
Import library Collections
Append to list ${lst} 1 2
"""
_collect_errors(workspace, doc, data_regression, basename="no_error")


def test_import_library_keyword_with_name(workspace, libspec_manager, data_regression):
workspace.set_root("case2", libspec_manager=libspec_manager)
doc = workspace.put_doc("case2.robot")
doc.source = """
*** Test Cases ***
Test case 1
${lst}= Evaluate []
Import library Collections WITH NAME Col
Col.Append to list ${lst} 1 2
"""
_collect_errors(workspace, doc, data_regression, basename="no_error")
Original file line number Diff line number Diff line change
Expand Up @@ -1308,3 +1308,22 @@ def test_semantic_var_in_expr(workspace):
("END", "control"),
],
)


def test_semantic_import_library(workspace):
check_simple(
workspace,
"""
*** Test Cases ***
Test case 1
Import library Collections WITH NAME Col
""",
[
("*** Test Cases ***", "header"),
("Test case 1", "testCaseName"),
("Import library", "keywordNameCall"),
("Collections", "argumentValue"),
("WITH NAME", "control"),
("Col", "argumentValue"),
],
)

0 comments on commit 478ebf8

Please sign in to comment.