robot.parsing.lexer package — Robot Framework 7.0.1 documentation (original) (raw)

Submodules

robot.parsing.lexer.blocklexers module

class robot.parsing.lexer.blocklexers.BlockLexer(ctx: LexingContext)[source]

Bases: Lexer, ABC

accepts_more(statement: List[Token]) → bool[source]

input(statement: List[Token])[source]

lexer_for(statement: List[Token]) → Lexer[source]

lexer_classes() → tuple[type[Lexer], ...][source]

lex()[source]

class robot.parsing.lexer.blocklexers.FileLexer(ctx: LexingContext)[source]

Bases: BlockLexer

lex()[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.SectionLexer(ctx: LexingContext)[source]

Bases: BlockLexer, ABC

ctx_: FileContext_

accepts_more(statement: List[Token]) → bool[source]

class robot.parsing.lexer.blocklexers.SettingSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.VariableSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.TestCaseSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.TaskSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.KeywordSectionLexer(ctx: LexingContext)[source]

Bases: SettingSectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.blocklexers.InvalidSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.TestOrKeywordLexer(ctx: LexingContext)[source]

Bases: BlockLexer, ABC

name_type_: str_

accepts_more(statement: List[Token]) → bool[source]

input(statement: List[Token])[source]

class robot.parsing.lexer.blocklexers.TestCaseLexer(ctx: SuiteFileContext)[source]

Bases: TestOrKeywordLexer

name_type_: str_ = 'TESTCASE NAME'

lex()[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.KeywordLexer(ctx: FileContext)[source]

Bases: TestOrKeywordLexer

name_type_: str_ = 'KEYWORD NAME'

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.NestedBlockLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: BlockLexer, ABC

ctx_: TestCaseContext | KeywordContext_

accepts_more(statement: List[Token]) → bool[source]

input(statement: List[Token])[source]

class robot.parsing.lexer.blocklexers.ForLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.WhileLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.TryLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.IfLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

class robot.parsing.lexer.blocklexers.InlineIfLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) → bool[source]

accepts_more(statement: List[Token]) → bool[source]

lexer_classes() → tuple[type[Lexer], ...][source]

input(statement: List[Token])[source]

robot.parsing.lexer.context module

class robot.parsing.lexer.context.LexingContext(settings: Settings, languages: Languages)[source]

Bases: object

lex_setting(statement: List[Token])[source]

class robot.parsing.lexer.context.FileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: LexingContext

settings_: FileSettings_

add_language(lang: Language | str | Path)[source]

keyword_context() → KeywordContext[source]

setting_section(statement: List[Token]) → bool[source]

variable_section(statement: List[Token]) → bool[source]

test_case_section(statement: List[Token]) → bool[source]

task_section(statement: List[Token]) → bool[source]

keyword_section(statement: List[Token]) → bool[source]

lex_invalid_section(statement: List[Token])[source]

class robot.parsing.lexer.context.SuiteFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings_: SuiteFileSettings_

test_case_context() → TestCaseContext[source]

test_case_section(statement: List[Token]) → bool[source]

task_section(statement: List[Token]) → bool[source]

class robot.parsing.lexer.context.ResourceFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings_: ResourceFileSettings_

class robot.parsing.lexer.context.InitFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings_: InitFileSettings_

class robot.parsing.lexer.context.TestCaseContext(settings: TestCaseSettings)[source]

Bases: LexingContext

settings_: TestCaseSettings_

property template_set_: bool_

class robot.parsing.lexer.context.KeywordContext(settings: KeywordSettings)[source]

Bases: LexingContext

settings_: KeywordSettings_

property template_set_: bool_

robot.parsing.lexer.lexer module

robot.parsing.lexer.lexer.get_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) → Iterator[Token][source]

Parses the given source to tokens.

Parameters:

Returns a generator that yields Tokeninstances.

robot.parsing.lexer.lexer.get_resource_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) → Iterator[Token][source]

Parses the given source to resource file tokens.

Same as get_tokens() otherwise, but the source is considered to be a resource file. This affects, for example, what settings are valid.

robot.parsing.lexer.lexer.get_init_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) → Iterator[Token][source]

Parses the given source to init file tokens.

Same as get_tokens() otherwise, but the source is considered to be a suite initialization file. This affects, for example, what settings are valid.

class robot.parsing.lexer.lexer.Lexer(ctx: LexingContext, data_only: bool = False, tokenize_variables: bool = False)[source]

Bases: object

input(source: Path | str | TextIO)[source]

get_tokens() → Iterator[Token][source]

robot.parsing.lexer.settings module

class robot.parsing.lexer.settings.Settings(languages: Languages)[source]

Bases: ABC

names_: tuple[str, ...]_ = ()

aliases_: dict[str, str]_ = {}

multi_use = ('Metadata', 'Library', 'Resource', 'Variables')

single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')

name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')

name_arguments_and_with_name = ('Library',)

lex(statement: List[Token])[source]

class robot.parsing.lexer.settings.FileSettings(languages: Languages)[source]

Bases: Settings, ABC

class robot.parsing.lexer.settings.SuiteFileSettings(languages: Languages)[source]

Bases: FileSettings

names_: tuple[str, ...]_ = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Test Timeout', 'Test Tags', 'Default Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')

aliases_: dict[str, str]_ = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Template': 'Test Template', 'Task Timeout': 'Test Timeout'}

class robot.parsing.lexer.settings.InitFileSettings(languages: Languages)[source]

Bases: FileSettings

names_: tuple[str, ...]_ = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Timeout', 'Test Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')

aliases_: dict[str, str]_ = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Timeout': 'Test Timeout'}

class robot.parsing.lexer.settings.ResourceFileSettings(languages: Languages)[source]

Bases: FileSettings

names_: tuple[str, ...]_ = ('Documentation', 'Keyword Tags', 'Library', 'Resource', 'Variables')

class robot.parsing.lexer.settings.TestCaseSettings(parent: SuiteFileSettings)[source]

Bases: Settings

names_: tuple[str, ...]_ = ('Documentation', 'Tags', 'Setup', 'Teardown', 'Template', 'Timeout')

property template_set_: bool_

class robot.parsing.lexer.settings.KeywordSettings(parent: FileSettings)[source]

Bases: Settings

names_: tuple[str, ...]_ = ('Documentation', 'Arguments', 'Setup', 'Teardown', 'Timeout', 'Tags', 'Return')

robot.parsing.lexer.statementlexers module

class robot.parsing.lexer.statementlexers.Lexer(ctx: LexingContext)[source]

Bases: ABC

handles(statement: List[Token]) → bool[source]

abstract accepts_more(statement: List[Token]) → bool[source]

abstract input(statement: List[Token])[source]

abstract lex()[source]

class robot.parsing.lexer.statementlexers.StatementLexer(ctx: LexingContext)[source]

Bases: Lexer, ABC

token_type_: str_

accepts_more(statement: List[Token]) → bool[source]

input(statement: List[Token])[source]

abstract lex()[source]

class robot.parsing.lexer.statementlexers.SingleType(ctx: LexingContext)[source]

Bases: StatementLexer, ABC

lex()[source]

class robot.parsing.lexer.statementlexers.TypeAndArguments(ctx: LexingContext)[source]

Bases: StatementLexer, ABC

lex()[source]

Bases: SingleType, ABC

handles(statement: List[Token]) → bool[source]

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SingleType

Bases: CommentLexer

class robot.parsing.lexer.statementlexers.SettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx_: FileContext_

lex()[source]

class robot.parsing.lexer.statementlexers.TestCaseSettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx_: TestCaseContext_

lex()[source]

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.KeywordSettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx_: KeywordContext_

lex()[source]

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.VariableLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

ctx_: FileContext_

token_type_: str_ = 'VARIABLE'

lex()[source]

class robot.parsing.lexer.statementlexers.KeywordCallLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx_: TestCaseContext | KeywordContext_

lex()[source]

Bases: StatementLexer

handles(statement: List[Token]) → bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) → bool[source]

Bases: StatementLexer

handles(statement: List[Token]) → bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) → bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) → bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) → bool[source]

Bases: StatementLexer

handles(statement: List[Token]) → bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) → bool[source]

Bases: StatementLexer

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.EndLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type_: str_ = 'END'

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.VarLexer(ctx: LexingContext)[source]

Bases: StatementLexer

token_type_: str_ = 'VAR'

handles(statement: List[Token]) → bool[source]

lex()[source]

class robot.parsing.lexer.statementlexers.ReturnLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type_: str_ = 'RETURN STATEMENT'

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.ContinueLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type_: str_ = 'CONTINUE'

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.BreakLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type_: str_ = 'BREAK'

handles(statement: List[Token]) → bool[source]

class robot.parsing.lexer.statementlexers.SyntaxErrorLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type_: str_ = 'ERROR'

handles(statement: List[Token]) → bool[source]

lex()[source]

robot.parsing.lexer.tokenizer module

class robot.parsing.lexer.tokenizer.Tokenizer[source]

Bases: object

tokenize(data: str, data_only: bool = False) → Iterator[list[Token]][source]

robot.parsing.lexer.tokens module

class robot.parsing.lexer.tokens.Token(type: str | None = None, value: str | None = None, lineno: int = -1, col_offset: int = -1, error: str | None = None)[source]

Bases: object

Token representing piece of Robot Framework data.

Each token has type, value, line number, column offset and end column offset in type, value, lineno, col_offsetand end_col_offset attributes, respectively. Tokens representing error also have their error message in error attribute.

Token types are declared as class attributes such as SETTING_HEADERand EOL. Values of these constants have changed slightly in Robot Framework 4.0, and they may change again in the future. It is thus safer to use the constants, not their values, when types are needed. For example, use Token(Token.EOL) instead of Token('EOL') andtoken.type == Token.EOL instead of token.type == 'EOL'.

If value is not given and type is a special marker likeIF or :attr:`EOL, the value is set automatically.

TESTCASE_NAME = 'TESTCASE NAME'

KEYWORD_NAME = 'KEYWORD NAME'

SUITE_NAME = 'SUITE NAME'

DOCUMENTATION = 'DOCUMENTATION'

SUITE_SETUP = 'SUITE SETUP'

SUITE_TEARDOWN = 'SUITE TEARDOWN'

METADATA = 'METADATA'

TEST_SETUP = 'TEST SETUP'

TEST_TEARDOWN = 'TEST TEARDOWN'

TEST_TEMPLATE = 'TEST TEMPLATE'

TEST_TIMEOUT = 'TEST TIMEOUT'

TEST_TAGS = 'TEST TAGS'

FORCE_TAGS = 'TEST TAGS'

DEFAULT_TAGS = 'DEFAULT TAGS'

KEYWORD_TAGS = 'KEYWORD TAGS'

LIBRARY = 'LIBRARY'

RESOURCE = 'RESOURCE'

VARIABLES = 'VARIABLES'

SETUP = 'SETUP'

TEARDOWN = 'TEARDOWN'

TEMPLATE = 'TEMPLATE'

TIMEOUT = 'TIMEOUT'

TAGS = 'TAGS'

ARGUMENTS = 'ARGUMENTS'

RETURN = 'RETURN'

RETURN_SETTING = 'RETURN'

AS = 'AS'

WITH_NAME = 'AS'

NAME = 'NAME'

VARIABLE = 'VARIABLE'

ARGUMENT = 'ARGUMENT'

ASSIGN = 'ASSIGN'

KEYWORD = 'KEYWORD'

FOR = 'FOR'

FOR_SEPARATOR = 'FOR SEPARATOR'

END = 'END'

IF = 'IF'

INLINE_IF = 'INLINE IF'

ELSE_IF = 'ELSE IF'

ELSE = 'ELSE'

TRY = 'TRY'

EXCEPT = 'EXCEPT'

FINALLY = 'FINALLY'

WHILE = 'WHILE'

VAR = 'VAR'

RETURN_STATEMENT = 'RETURN STATEMENT'

CONTINUE = 'CONTINUE'

BREAK = 'BREAK'

OPTION = 'OPTION'

SEPARATOR = 'SEPARATOR'

CONTINUATION = 'CONTINUATION'

CONFIG = 'CONFIG'

EOL = 'EOL'

EOS = 'EOS'

ERROR = 'ERROR'

FATAL_ERROR = 'FATAL ERROR'

NON_DATA_TOKENS = frozenset({'COMMENT', 'CONTINUATION', 'EOL', 'EOS', 'SEPARATOR'})

SETTING_TOKENS = frozenset({'ARGUMENTS', 'DEFAULT TAGS', 'DOCUMENTATION', 'KEYWORD TAGS', 'LIBRARY', 'METADATA', 'RESOURCE', 'RETURN', 'SETUP', 'SUITE NAME', 'SUITE SETUP', 'SUITE TEARDOWN', 'TAGS', 'TEARDOWN', 'TEMPLATE', 'TEST SETUP', 'TEST TAGS', 'TEST TEARDOWN', 'TEST TEMPLATE', 'TEST TIMEOUT', 'TIMEOUT', 'VARIABLES'})

ALLOW_VARIABLES = frozenset({'ARGUMENT', 'KEYWORD NAME', 'NAME', 'TESTCASE NAME'})

type

value

lineno

col_offset

error

property end_col_offset_: int_

set_error(error: str)[source]

tokenize_variables() → Iterator[Token][source]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (seeToken.ALLOW_VARIABLES) or its value does not contain variables. Otherwise, yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

class robot.parsing.lexer.tokens.EOS(lineno: int = -1, col_offset: int = -1)[source]

Bases: Token

Token representing end of a statement.

classmethod from_token(token: Token, before: bool = False) → EOS[source]

class robot.parsing.lexer.tokens.END(lineno: int = -1, col_offset: int = -1, virtual: bool = False)[source]

Bases: Token

Token representing END token used to signify block ending.

Virtual END tokens have ‘’ as their value, with “real” END tokens the value is ‘END’.

classmethod from_token(token: Token, virtual: bool = False) → END[source]