diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 4d42e6a9..1f7e952d 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -9,6 +9,22 @@ on: branches: [ main ] jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install ruff + - name: Ruff linter + run: ruff check --output-format=github . + - name: Ruff formatter + run: ruff format --check --diff . test: strategy: fail-fast: false @@ -16,9 +32,6 @@ jobs: include: - os: 'windows-latest' python-version: '3.9' - rf-version: 'rf5' - - os: 'ubuntu-latest' - python-version: '3.8' rf-version: 'rf4' - os: 'ubuntu-latest' python-version: '3.9' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 265454b6..1c694440 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,7 @@ repos: -- repo: https://github.com/pycqa/isort - rev: 5.11.5 - hooks: - - id: isort - name: isort (python) - -- repo: https://github.com/psf/black - rev: 22.3.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.9 hooks: - - id: black + #- id: ruff + - id: ruff-format \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3985b327..9a587610 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,55 @@ -[tool.black] +[tool.ruff] line-length = 120 -[tool.isort] -profile = "black" -line_length = 120 \ No newline at end of file +show-fixes = true +target-version = "py39" # TODO verify min Python version + +lint.select = [ + "ALL", # include all the rules, including new ones +] +exclude = [ + "tests/utest/testdata/" +] +lint.ignore = [ + #### modules + "ANN", # flake8-annotations + "COM", # flake8-commas + "C90", # mccabe complexity + "DJ", # django + "EXE", # flake8-executable + "PTH", # flake8-use-pathlib + "T10", # debugger + "TID", # flake8-tidy-imports + #### specific rules + "D100", # ignore missing docs + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", + "D200", + "D203", # blank line before class body + "D205", + "D212", + "D400", + "D401", + "D415", + "E402", # false positives for local imports + "E722", # bare except + "B904", # Raise from None + "EM101", # No string-literal exceptions !? + "EM102", # No Exceptions with f-strings + "PLR0913", # too many arguments to function call + "TRY003", # external messages in exceptions are too verbose + "TD002", + "TD003", + "FIX002", # too verbose descriptions of todos + "PLE1205", # too many arguments for logging + "T201", # print in code +] + +[tool.ruff.format] +quote-style = "double" +skip-magic-trailing-comma = false +line-ending = "auto" diff --git a/robotidy/api.py b/robotidy/api.py index b24623b1..5b18236d 100644 --- a/robotidy/api.py +++ b/robotidy/api.py @@ -1,6 +1,7 @@ """ Methods for transforming Robot Framework ast model programmatically. """ + from __future__ import annotations from pathlib import Path @@ -37,7 +38,8 @@ def transform_model(model, root_dir: str, output: str | None = None, **kwargs) - """ robotidy_class = get_robotidy(root_dir, output, **kwargs) disabler_finder = disablers.RegisterDisablers( - robotidy_class.config.formatting.start_line, robotidy_class.config.formatting.end_line + robotidy_class.config.formatting.start_line, + robotidy_class.config.formatting.end_line, ) disabler_finder.visit(model) if disabler_finder.is_disabled_in_file(disablers.ALL_TRANSFORMERS): diff --git a/robotidy/app.py b/robotidy/app.py index e26b8981..ae14a9f8 100644 --- a/robotidy/app.py +++ b/robotidy/app.py @@ -61,7 +61,10 @@ def transform_files(self): self.output_diff(model_path, old_model, new_model) changed_files += 1 except DataError as err: - click.echo(f"Failed to decode {source} with an error: {err}\nSkipping file", err=True) + click.echo( + f"Failed to decode {source} with an error: {err}\nSkipping file", + err=True, + ) changed_files = previous_changed_files skipped_files += 1 return self.formatting_result(all_files, changed_files, skipped_files, stdin) @@ -109,7 +112,7 @@ def transform_until_stable(self, model, disabler_finder): def transform(self, model, disablers): old_model = misc.StatementLinesCollector(model) for transformer in self.config.transformers: - setattr(transformer, "disablers", disablers) # set dynamically to allow using external transformers + transformer.disablers = disablers # set dynamically to allow using external transformers if disablers.is_disabled_in_file(transformer.__class__.__name__): continue transformer.visit(model) @@ -137,11 +140,15 @@ def get_line_ending(self, path: str): return os.linesep if isinstance(f.newlines, str): return f.newlines - else: - return f.newlines[0] + return f.newlines[0] return self.config.formatting.line_sep - def output_diff(self, path: str, old_model: misc.StatementLinesCollector, new_model: misc.StatementLinesCollector): + def output_diff( + self, + path: str, + old_model: misc.StatementLinesCollector, + new_model: misc.StatementLinesCollector, + ): if not self.config.show_diff: return old = [l + "\n" for l in old_model.text.splitlines()] diff --git a/robotidy/cli.py b/robotidy/cli.py index eafad1a3..8c542212 100644 --- a/robotidy/cli.py +++ b/robotidy/cli.py @@ -2,7 +2,7 @@ import sys from pathlib import Path -from typing import Pattern +from re import Pattern try: import rich_click as click @@ -13,12 +13,15 @@ RICH_PRESENT = False -from robotidy import app +from robotidy import app, decorators, exceptions, files, skip, version from robotidy import config as config_module -from robotidy import decorators, exceptions, files, skip, version from robotidy.config import RawConfig, csv_list_type, validate_target_version from robotidy.rich_console import console -from robotidy.transformers import TransformConfigMap, TransformConfigParameter, load_transformers +from robotidy.transformers import ( + TransformConfigMap, + TransformConfigParameter, + load_transformers, +) from robotidy.utils import misc CLI_OPTIONS_LIST = [ @@ -55,7 +58,10 @@ "--endline", ], }, - {"name": "File exclusion", "options": ["--exclude", "--extend-exclude", "--skip-gitignore"]}, + { + "name": "File exclusion", + "options": ["--exclude", "--extend-exclude", "--skip-gitignore"], + }, skip.option_group, { "name": "Other", @@ -124,7 +130,11 @@ def print_transformer_docs(transformer): @decorators.optional_rich def print_description(name: str, target_version: int): # TODO: --desc works only for default transformers, it should also print custom transformer desc - transformers = load_transformers(TransformConfigMap([], [], []), allow_disabled=True, target_version=target_version) + transformers = load_transformers( + TransformConfigMap([], [], []), + allow_disabled=True, + target_version=target_version, + ) transformer_by_names = {transformer.name: transformer for transformer in transformers} if name == "all": for transformer in transformers: @@ -159,7 +169,11 @@ def print_transformers_list(global_config: config_module.MainConfig): table = Table(title="Transformers", header_style="bold red") table.add_column("Name", justify="left", no_wrap=True) table.add_column("Enabled") - transformers = load_transformers(TransformConfigMap([], [], []), allow_disabled=True, target_version=target_version) + transformers = load_transformers( + TransformConfigMap([], [], []), + allow_disabled=True, + target_version=target_version, + ) transformers.extend(_load_external_transformers(transformers, config.transformers_config, target_version)) for transformer in transformers: @@ -194,7 +208,11 @@ def generate_config(global_config: config_module.MainConfig): raise exceptions.MissingOptionalTomliWDependencyError() target_version = global_config.default.target_version config = global_config.default_loaded - transformers = load_transformers(TransformConfigMap([], [], []), allow_disabled=True, target_version=target_version) + transformers = load_transformers( + TransformConfigMap([], [], []), + allow_disabled=True, + target_version=target_version, + ) transformers.extend(_load_external_transformers(transformers, config.transformers_config, target_version)) toml_config = { diff --git a/robotidy/config.py b/robotidy/config.py index 59740546..4aa8e649 100644 --- a/robotidy/config.py +++ b/robotidy/config.py @@ -8,7 +8,7 @@ from collections import namedtuple from dataclasses import dataclass, field from pathlib import Path -from typing import Pattern +from re import Pattern try: from robot.api import Languages # RF 6.0 @@ -19,7 +19,12 @@ from click.core import ParameterSource from robotidy import exceptions, files, skip -from robotidy.transformers import TransformConfig, TransformConfigMap, convert_transform_config, load_transformers +from robotidy.transformers import ( + TransformConfig, + TransformConfigMap, + convert_transform_config, + load_transformers, +) from robotidy.utils import misc @@ -60,12 +65,11 @@ def __init__( def get_line_sep(line_sep): if line_sep == "windows": return "\r\n" - elif line_sep == "unix": + if line_sep == "unix": return "\n" - elif line_sep == "auto": + if line_sep == "auto": return "auto" - else: - return os.linesep + return os.linesep def validate_target_version(value: str | None) -> int | None: @@ -98,7 +102,12 @@ def convert_transformers_config( is_config: bool = False, ) -> list[TransformConfig]: return [ - TransformConfig(tr, force_include=force_included, custom_transformer=custom_transformer, is_config=is_config) + TransformConfig( + tr, + force_include=force_included, + custom_transformer=custom_transformer, + is_config=is_config, + ) for tr in config.get(param_name, ()) ] @@ -180,13 +189,17 @@ def from_cli(cls, ctx: click.Context, **kwargs): defined_in_cli.add(option) return cls(**kwargs, defined_in_cli=defined_in_cli) - def from_config_file(self, config: dict, config_path: Path) -> "RawConfig": - """Creates new RawConfig instance from dictionary. + def from_config_file(self, config: dict, config_path: Path) -> RawConfig: + """ + Creates new RawConfig instance from dictionary. Dictionary key:values needs to be normalized and parsed to correct types. """ options_map = map_class_fields_with_their_types(self) - parsed_config = {"defined_in_config": {"defined_in_config", "config_path"}, "config_path": config_path} + parsed_config = { + "defined_in_config": {"defined_in_config", "config_path"}, + "config_path": config_path, + } for key, value in config.items(): # workaround to be able to use two option names for same action - backward compatibility change if key == "load_transformers": @@ -206,7 +219,10 @@ def from_config_file(self, config: dict, config_path: Path) -> "RawConfig": parsed_config[key] = [convert_transform_config(val, key) for val in value] elif key == "src": parsed_config[key] = tuple(value) - elif value_type in ("Pattern", Pattern): # future typing for 3.8 provides type as str + elif value_type in ( + "Pattern", + Pattern, + ): # future typing for 3.8 provides type as str parsed_config[key] = misc.validate_regex(value) else: parsed_config[key] = value @@ -214,8 +230,9 @@ def from_config_file(self, config: dict, config_path: Path) -> "RawConfig": from_config = RawConfig(**parsed_config) return self.merge_with_config_file(from_config) - def merge_with_config_file(self, config: "RawConfig") -> "RawConfig": - """Merge cli config with the configuration file config. + def merge_with_config_file(self, config: RawConfig) -> RawConfig: + """ + Merge cli config with the configuration file config. Use configuration file parameter value only if it was not defined in the cli already. """ @@ -253,7 +270,8 @@ def load_config_from_option(cli_config: RawConfig) -> RawConfig: return cli_config def get_sources(self, sources: tuple[str, ...]) -> tuple[str, ...] | None: - """Get list of sources to be transformed by Robotidy. + """ + Get list of sources to be transformed by Robotidy. If the sources tuple is empty, look for most common configuration file and load sources from there. """ @@ -273,7 +291,10 @@ def get_sources(self, sources: tuple[str, ...]) -> tuple[str, ...] | None: def get_sources_with_configs(self): sources = files.get_paths( - self.sources, self.default.exclude, self.default.extend_exclude, self.default.skip_gitignore + self.sources, + self.default.exclude, + self.default.extend_exclude, + self.default.skip_gitignore, ) for source in sources: if self.default.config: @@ -350,7 +371,7 @@ def set_color_mode(color: bool) -> bool: return "NO_COLOR" not in os.environ @classmethod - def from_raw_config(cls, raw_config: "RawConfig"): + def from_raw_config(cls, raw_config: RawConfig): skip_config = skip.SkipConfig( documentation=raw_config.skip_documentation, return_values=raw_config.skip_return_values, @@ -416,8 +437,8 @@ def load_transformers(self, transformers_config: TransformConfigMap, force_order ) for transformer in transformers: # inject global settings TODO: handle it better - setattr(transformer.instance, "formatting_config", self.formatting) - setattr(transformer.instance, "transformers", self.transformers_lookup) - setattr(transformer.instance, "languages", self.language) + transformer.instance.formatting_config = self.formatting + transformer.instance.transformers = self.transformers_lookup + transformer.instance.languages = self.language self.transformers.append(transformer.instance) self.transformers_lookup[transformer.name] = transformer.instance diff --git a/robotidy/disablers.py b/robotidy/disablers.py index 547b2566..5e568d2a 100644 --- a/robotidy/disablers.py +++ b/robotidy/disablers.py @@ -66,7 +66,12 @@ def is_line_start(node): class DisablersInFile: - def __init__(self, start_line: Optional[int], end_line: Optional[int], file_end: Optional[int] = None): + def __init__( + self, + start_line: Optional[int], + end_line: Optional[int], + file_end: Optional[int] = None, + ): self.start_line = start_line self.end_line = end_line self.file_end = file_end @@ -254,9 +259,8 @@ def visit_Statement(self, node): # noqa continue self.disablers.add_disabler(transformer, start_line, node.lineno) self.disablers_in_scope[index][transformer] = 0 - else: - if not self.disablers_in_scope[index].get(transformer): - self.disablers_in_scope[index][transformer] = node.lineno + elif not self.disablers_in_scope[index].get(transformer): + self.disablers_in_scope[index][transformer] = node.lineno else: # inline disabler for comment in node.get_tokens(Token.COMMENT): diff --git a/robotidy/exceptions.py b/robotidy/exceptions.py index 29d2418c..3b748b67 100644 --- a/robotidy/exceptions.py +++ b/robotidy/exceptions.py @@ -46,8 +46,8 @@ def __init__(self): class MissingOptionalTomliWDependencyError(RobotidyConfigError): def __init__(self): super().__init__( - f"Missing optional dependency: tomli_w. Install robotidy with extra `generate_config` " - f"profile:\n\npip install robotframework-tidy[generate_config]" + "Missing optional dependency: tomli_w. Install robotidy with extra `generate_config` " + "profile:\n\npip install robotframework-tidy[generate_config]" ) diff --git a/robotidy/files.py b/robotidy/files.py index c722f69e..10cc1b94 100644 --- a/robotidy/files.py +++ b/robotidy/files.py @@ -1,8 +1,10 @@ from __future__ import annotations +from collections.abc import Iterable, Iterator from functools import lru_cache from pathlib import Path -from typing import Any, Iterable, Iterator, Pattern +from re import Pattern +from typing import Any import pathspec @@ -19,9 +21,10 @@ CONFIG_NAMES = ("robotidy.toml", "pyproject.toml", DOTFILE_CONFIG) -@lru_cache() +@lru_cache def find_source_config_file(src: Path, ignore_git_dir: bool = False) -> Path | None: - """Find and return configuration file for the source path. + """ + Find and return configuration file for the source path. This method looks iteratively in source parents for directory that contains configuration file and returns its path. The lru_cache speeds up searching if there are multiple files in the same directory (they will @@ -41,9 +44,10 @@ def find_source_config_file(src: Path, ignore_git_dir: bool = False) -> Path | N return find_source_config_file(src.parent, ignore_git_dir) -@lru_cache() +@lru_cache def find_project_root(srcs: tuple[str, ...], ignore_git_dir: bool = False) -> Path: - """Return a directory containing .git, or robotidy.toml. + """ + Return a directory containing .git, or robotidy.toml. That directory will be a common parent of all files and directories passed in `srcs`. If no directory in the tree contains a marker that would specify it's the @@ -87,7 +91,7 @@ def read_pyproject_config(config_path: Path) -> dict[str, Any]: return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} -@lru_cache() +@lru_cache def get_gitignore(root: Path) -> pathspec.PathSpec: """Return a PathSpec matching gitignore content if present.""" gitignore = root / ".gitignore" @@ -128,7 +132,12 @@ def get_path_relative_to_project_root(path: Path, root_parent: Path) -> Path: return path -def get_paths(src: tuple[str, ...], exclude: Pattern | None, extend_exclude: Pattern | None, skip_gitignore: bool): +def get_paths( + src: tuple[str, ...], + exclude: Pattern | None, + extend_exclude: Pattern | None, + skip_gitignore: bool, +): root = find_project_root(src) if skip_gitignore: gitignore = None diff --git a/robotidy/skip.py b/robotidy/skip.py index 9c859c19..1e309b7f 100644 --- a/robotidy/skip.py +++ b/robotidy/skip.py @@ -1,7 +1,7 @@ from __future__ import annotations import re -from typing import Pattern +from re import Pattern import click from robot.api import Token @@ -114,7 +114,16 @@ def __init__(self, skip_config: SkipConfig): @staticmethod def parse_skip_settings(skip_config): - settings = {"settings", "arguments", "setup", "teardown", "timeout", "template", "return_statement", "tags"} + settings = { + "settings", + "arguments", + "setup", + "teardown", + "timeout", + "template", + "return_statement", + "tags", + } skip_settings = set() for setting in settings: if getattr(skip_config, setting): @@ -156,7 +165,10 @@ def section(self, name): documentation_option = click.option("--skip-documentation", is_flag=True, help="Skip formatting of documentation") return_values_option = click.option("--skip-return-values", is_flag=True, help="Skip formatting of return values") keyword_call_option = click.option( - "--skip-keyword-call", type=str, multiple=True, help="Keyword call name that should not be formatted" + "--skip-keyword-call", + type=str, + multiple=True, + help="Keyword call name that should not be formatted", ) keyword_call_pattern_option = click.option( "--skip-keyword-call-pattern", diff --git a/robotidy/transformers/AddMissingEnd.py b/robotidy/transformers/AddMissingEnd.py index 80029ef7..010f2f02 100644 --- a/robotidy/transformers/AddMissingEnd.py +++ b/robotidy/transformers/AddMissingEnd.py @@ -110,7 +110,8 @@ def fix_header_name(node, header_name): node.header.data_tokens[0].value = header_name def collect_inside_statements(self, node): - """Split statements from node for those that belong to it and outside nodes. + """ + Split statements from node for those that belong to it and outside nodes. In this example with missing END: FOR ${i} IN RANGE 10 diff --git a/robotidy/transformers/AlignSettingsSection.py b/robotidy/transformers/AlignSettingsSection.py index 0a884f36..b31de0c6 100644 --- a/robotidy/transformers/AlignSettingsSection.py +++ b/robotidy/transformers/AlignSettingsSection.py @@ -157,10 +157,8 @@ def calc_separator(self, index, up_to, indent_arg, token, look_up): ) * " " ) - else: - return (look_up[index] - len(token.value) + arg_indent + 4) * " " - else: - return self.formatting_config.space_count * " " + return (look_up[index] - len(token.value) + arg_indent + 4) * " " + return self.formatting_config.space_count * " " def create_look_up(self, statements): look_up = defaultdict(int) diff --git a/robotidy/transformers/AlignVariablesSection.py b/robotidy/transformers/AlignVariablesSection.py index 22b69634..2cb7eb89 100644 --- a/robotidy/transformers/AlignVariablesSection.py +++ b/robotidy/transformers/AlignVariablesSection.py @@ -123,8 +123,7 @@ def get_separator(self, index: int, up_to: int, token, look_up: dict[int, int]) if self.fixed_width: return max(self.fixed_width - len(token.value), self.formatting_config.space_count) * " " return (look_up[index] - len(token.value)) * " " - else: - return self.formatting_config.separator + return self.formatting_config.separator def create_look_up(self, statements) -> dict[int, int]: look_up = defaultdict(int) diff --git a/robotidy/transformers/IndentNestedKeywords.py b/robotidy/transformers/IndentNestedKeywords.py index 19257672..5f9f3767 100644 --- a/robotidy/transformers/IndentNestedKeywords.py +++ b/robotidy/transformers/IndentNestedKeywords.py @@ -63,7 +63,7 @@ def get_run_keyword(self, kw_name): kw_norm = misc.normalize_name(kw_name) return self.run_keywords.get(kw_norm, None) - def get_setting_lines(self, node, indent): # noqa + def get_setting_lines(self, node, indent): if self.skip.setting("any") or node.errors or not len(node.data_tokens) > 1: return None run_keyword = self.get_run_keyword(node.data_tokens[1].value) @@ -126,7 +126,11 @@ def visit_SuiteSetup(self, node): # noqa comments = misc.collect_comments_from_tokens(node.tokens, indent=None) separator = self.get_separator() new_line = misc.get_new_line() - tokens = [node.data_tokens[0], separator, *misc.join_tokens_with_token(lines[0][1], separator)] + tokens = [ + node.data_tokens[0], + separator, + *misc.join_tokens_with_token(lines[0][1], separator), + ] formatted_tokens = self.parse_keyword_lines(lines, tokens, new_line, eol=node.tokens[-1]) if self.node_was_transformed(node.tokens, formatted_tokens): node.tokens = formatted_tokens @@ -144,7 +148,12 @@ def visit_Setup(self, node): # noqa indent = node.tokens[0] separator = self.get_separator() new_line = misc.get_new_line(indent) - tokens = [indent, node.data_tokens[0], separator, *misc.join_tokens_with_token(lines[0][1], separator)] + tokens = [ + indent, + node.data_tokens[0], + separator, + *misc.join_tokens_with_token(lines[0][1], separator), + ] comment = misc.merge_comments_into_one(node.tokens) if comment: # need to add comments on first line for [Setup] / [Teardown] settings @@ -211,7 +220,8 @@ def split_too_long_lines(self, lines, indent): return new_lines def calculate_line_indent(self, column, starting_indent): - """Calculate with of the continuation indent. + """ + Calculate with of the continuation indent. For example following line will have 4 + 3 + 2x column x 4 indent with: diff --git a/robotidy/transformers/InlineIf.py b/robotidy/transformers/InlineIf.py index c722fa99..4be97e09 100644 --- a/robotidy/transformers/InlineIf.py +++ b/robotidy/transformers/InlineIf.py @@ -1,6 +1,15 @@ from itertools import chain -from robot.api.parsing import Comment, ElseHeader, ElseIfHeader, End, If, IfHeader, KeywordCall, Token +from robot.api.parsing import ( + Comment, + ElseHeader, + ElseIfHeader, + End, + If, + IfHeader, + KeywordCall, + Token, +) try: from robot.api.parsing import Break, Continue, InlineIfHeader, ReturnStatement @@ -126,7 +135,7 @@ def is_shorter_than_limit(self, inline_if): def no_end(node): if not node.end: return True - if not len(node.end.tokens) == 1: + if len(node.end.tokens) != 1: return False return not node.end.tokens[0].value @@ -179,7 +188,11 @@ def inline_if_from_branch(self, node, indent): # check for ElseIfHeader first since it's child of IfHeader class if isinstance(node.header, ElseIfHeader): header = ElseIfHeader( - [Token(Token.ELSE_IF), Token(Token.SEPARATOR, separator), Token(Token.ARGUMENT, node.header.condition)] + [ + Token(Token.ELSE_IF), + Token(Token.SEPARATOR, separator), + Token(Token.ARGUMENT, node.header.condition), + ] ) elif isinstance(node.header, IfHeader): tokens = [Token(Token.SEPARATOR, indent)] @@ -202,7 +215,10 @@ def inline_if_from_branch(self, node, indent): @staticmethod def to_inline_keyword(keyword, separator, last_token): - tokens = [Token(Token.SEPARATOR, separator), Token(Token.KEYWORD, keyword.keyword)] + tokens = [ + Token(Token.SEPARATOR, separator), + Token(Token.KEYWORD, keyword.keyword), + ] for arg in keyword.get_tokens(Token.ARGUMENT): tokens.extend([Token(Token.SEPARATOR, separator), arg]) tokens.append(last_token) @@ -321,11 +337,15 @@ def handle_inline_if_create(self, node, indent, assign): else_found = False if isinstance(node.header, InlineIfHeader): header = IfHeader.from_params( - condition=node.condition, indent=indent, separator=self.formatting_config.separator + condition=node.condition, + indent=indent, + separator=self.formatting_config.separator, ) elif isinstance(node.header, ElseIfHeader): header = ElseIfHeader.from_params( - condition=node.condition, indent=indent, separator=self.formatting_config.separator + condition=node.condition, + indent=indent, + separator=self.formatting_config.separator, ) else: header = ElseHeader.from_params(indent=indent) diff --git a/robotidy/transformers/MergeAndOrderSections.py b/robotidy/transformers/MergeAndOrderSections.py index 2db12cd0..10aadfda 100644 --- a/robotidy/transformers/MergeAndOrderSections.py +++ b/robotidy/transformers/MergeAndOrderSections.py @@ -94,7 +94,7 @@ def parse_order(self, order): } parsed_order = [self.LANGUAGE_MARKER_SECTION] for part in parts: - parsed_order.append(map_names.get(part, None)) + parsed_order.append(map_names.get(part)) # all sections need to be here, and either tasks or test cases or both of them any_of_sections = [Token.TESTCASE_HEADER, "TASK HEADER"] required_sections = [section for section in default_order if section not in any_of_sections] @@ -148,7 +148,8 @@ def normalize_eol(tokens): return new_tokens def from_last_section(self, node): - """Last node use different logic for new line marker. It is not possible to preserve all empty lines, but + """ + Last node use different logic for new line marker. It is not possible to preserve all empty lines, but we need at least ensure that following code:: *** Test Case *** diff --git a/robotidy/transformers/NormalizeSeparators.py b/robotidy/transformers/NormalizeSeparators.py index 3ebb3ee4..efc6d1d5 100644 --- a/robotidy/transformers/NormalizeSeparators.py +++ b/robotidy/transformers/NormalizeSeparators.py @@ -128,8 +128,7 @@ def visit_Statement(self, statement): # noqa has_pipes = statement.tokens[0].value.startswith("|") if has_pipes or not self.flatten_lines: return self.handle_spaces(statement, has_pipes) - else: - return self.handle_spaces_and_flatten_lines(statement) + return self.handle_spaces_and_flatten_lines(statement) @staticmethod def has_trailing_sep(tokens): diff --git a/robotidy/transformers/OrderSettingsSection.py b/robotidy/transformers/OrderSettingsSection.py index 3defdbbb..5cb1908c 100644 --- a/robotidy/transformers/OrderSettingsSection.py +++ b/robotidy/transformers/OrderSettingsSection.py @@ -92,7 +92,11 @@ def __init__( "tags", tags_order, (Token.FORCE_TAGS, Token.DEFAULT_TAGS), - {"force_tags": Token.FORCE_TAGS, "test_tags": Token.FORCE_TAGS, "default_tags": Token.DEFAULT_TAGS}, + { + "force_tags": Token.FORCE_TAGS, + "test_tags": Token.FORCE_TAGS, + "default_tags": Token.DEFAULT_TAGS, + }, ) def parse_group_order(self, order): @@ -137,7 +141,7 @@ def visit_File(self, node): # noqa @skip_section_if_disabled def visit_SettingSection(self, node): # noqa if not node.body: - return + return None if node is self.last_section and not isinstance(node.body[-1], EmptyLine): node.body[-1] = self.fix_eol(node.body[-1]) comments, errors = [], [] diff --git a/robotidy/transformers/RenameKeywords.py b/robotidy/transformers/RenameKeywords.py index 7c3caac0..fc1c6d56 100644 --- a/robotidy/transformers/RenameKeywords.py +++ b/robotidy/transformers/RenameKeywords.py @@ -149,7 +149,10 @@ def remove_underscores_and_capitalize(self, value: str) -> str: # capitalize first letter of every word, leave rest untouched for index, word in enumerate(split_words): if not word: - if index in (0, len(split_words) - 1): # leading and trailing whitespace + if index in ( + 0, + len(split_words) - 1, + ): # leading and trailing whitespace words.append("") else: words.append(word[0].upper() + word[1:]) @@ -195,11 +198,11 @@ def visit_KeywordCall(self, node): # noqa def parse_run_keyword(self, tokens): if not tokens: - return + return None self.rename_node(tokens[0], is_keyword_call=True) run_keyword = self.get_run_keyword(tokens[0].value) if not run_keyword: - return + return None tokens = tokens[run_keyword.resolve :] if run_keyword.branches: if "ELSE IF" in run_keyword.branches: @@ -210,7 +213,7 @@ def parse_run_keyword(self, tokens): prefix, branch, tokens = misc.split_on_token_value(tokens, "ELSE", 1) self.parse_run_keyword(prefix) self.parse_run_keyword(tokens) - return + return None elif run_keyword.split_on_and: return self.split_on_and(tokens) self.parse_run_keyword(tokens) diff --git a/robotidy/transformers/RenameTestCases.py b/robotidy/transformers/RenameTestCases.py index cf2d356f..b1ed168a 100644 --- a/robotidy/transformers/RenameTestCases.py +++ b/robotidy/transformers/RenameTestCases.py @@ -94,6 +94,7 @@ class RenameTestCases(Transformer): No Operation ``` """ + ENABLED = False def __init__( diff --git a/robotidy/transformers/RenameVariables.py b/robotidy/transformers/RenameVariables.py index 600d42f2..26749db1 100644 --- a/robotidy/transformers/RenameVariables.py +++ b/robotidy/transformers/RenameVariables.py @@ -2,7 +2,7 @@ import re from enum import Enum -from typing import Pattern +from re import Pattern from robot.api.parsing import Arguments, Token from robot.errors import VariableError @@ -14,7 +14,12 @@ from robotidy.transformers import Transformer from robotidy.utils import misc, variable_matcher -SET_GLOBAL_VARIABLES = {"settestvariable", "settaskvariable", "setsuitevariable", "setglobalvariable"} +SET_GLOBAL_VARIABLES = { + "settestvariable", + "settaskvariable", + "setsuitevariable", + "setglobalvariable", +} SET_LOCAL_VARIABLE = "setlocalvariable" @@ -56,7 +61,8 @@ def is_set_local_variable(keyword: str) -> bool: def is_nested_variable(variable: str) -> bool: - """Checks if variable name is nested. + """ + Checks if variable name is nested. name -> not nested ${name} -> not nested @@ -100,7 +106,7 @@ def __init__(self): self._global = set() @staticmethod - def _get_var_name(variable: str) -> "str|None": + def _get_var_name(variable: str) -> str | None: if len(variable) > 1 and variable[0] in "$@&" and variable[1] != "{": variable = f"{variable[0]}{{{variable[1:]}}}" match = search_variable(variable, ignore_errors=True) @@ -113,7 +119,8 @@ def add_global(self, variable: str): self._global.add(misc.normalize_name(var_name)) def add_local(self, variable: str, split_pattern: bool = False): - """Add variable name to local cache. + """ + Add variable name to local cache. If the variable is embedded argument, it can contain pattern we need to ignore (${var:[^pattern]}) """ @@ -277,23 +284,11 @@ def visit_LibraryImport(self, node): # noqa ) return self.generic_visit(node) - visit_Tags = ( - visit_DefaultTags - ) = ( - visit_TestTags - ) = ( - visit_ForceTags - ) = ( - visit_Metadata - ) = ( - visit_SuiteSetup - ) = ( + visit_Tags = visit_DefaultTags = visit_TestTags = visit_ForceTags = visit_Metadata = visit_SuiteSetup = ( visit_SuiteTeardown - ) = ( - visit_TestSetup - ) = ( - visit_TestTeardown - ) = visit_TestTemplate = visit_TestTimeout = visit_VariablesImport = visit_ResourceImport = visit_LibraryImport + ) = visit_TestSetup = visit_TestTeardown = visit_TestTemplate = visit_TestTimeout = visit_VariablesImport = ( + visit_ResourceImport + ) = visit_LibraryImport @skip_if_disabled def visit_Setup(self, node): # noqa @@ -301,9 +296,9 @@ def visit_Setup(self, node): # noqa data_token.value = self.rename_value(data_token.value, variable_case=VariableCase.AUTO, is_var=False) return self.generic_visit(node) - visit_Teardown = ( - visit_Timeout - ) = visit_Template = visit_Return = visit_ReturnStatement = visit_ReturnSetting = visit_Setup + visit_Teardown = visit_Timeout = visit_Template = visit_Return = visit_ReturnStatement = visit_ReturnSetting = ( + visit_Setup + ) @skip_if_disabled def visit_Variable(self, node): # noqa @@ -312,11 +307,15 @@ def visit_Variable(self, node): # noqa for data_token in node.data_tokens: if data_token.type == Token.VARIABLE: data_token.value = self.rename_value( - data_token.value, variable_case=self.variables_section_case, is_var=True + data_token.value, + variable_case=self.variables_section_case, + is_var=True, ) elif data_token.type == Token.ARGUMENT: data_token.value = self.rename_value( - data_token.value, variable_case=self.variables_section_case, is_var=False + data_token.value, + variable_case=self.variables_section_case, + is_var=False, ) return node @@ -329,7 +328,7 @@ def visit_TestCase(self, node): # noqa def visit_TemplateArguments(self, node): # noqa for arg_template in node.get_tokens(Token.ARGUMENT): arg_template.value = self.rename_value(arg_template.value, variable_case=VariableCase.AUTO, is_var=False) - return self.generic_visit(node) # noqa + return self.generic_visit(node) @skip_if_disabled def visit_TestCaseName(self, node): # noqa diff --git a/robotidy/transformers/ReplaceBreakContinue.py b/robotidy/transformers/ReplaceBreakContinue.py index 1f07cc61..8d70f4dc 100644 --- a/robotidy/transformers/ReplaceBreakContinue.py +++ b/robotidy/transformers/ReplaceBreakContinue.py @@ -1,4 +1,4 @@ -from typing import Iterable +from collections.abc import Iterable from robot.api.parsing import Token @@ -74,11 +74,11 @@ def visit_KeywordCall(self, node): # noqa return node if normalized_name == "continueforloop": return self.create_statement_from_tokens(statement=Continue, tokens=node.tokens[2:], indent=node.tokens[0]) - elif normalized_name == "exitforloop": + if normalized_name == "exitforloop": return self.create_statement_from_tokens(statement=Break, tokens=node.tokens[2:], indent=node.tokens[0]) - elif normalized_name == "continueforloopif": + if normalized_name == "continueforloopif": return misc.wrap_in_if_and_replace_statement(node, Continue, self.formatting_config.separator) - elif normalized_name == "exitforloopif": + if normalized_name == "exitforloopif": return misc.wrap_in_if_and_replace_statement(node, Break, self.formatting_config.separator) return node diff --git a/robotidy/transformers/ReplaceReturns.py b/robotidy/transformers/ReplaceReturns.py index 01cca30a..d562f356 100644 --- a/robotidy/transformers/ReplaceReturns.py +++ b/robotidy/transformers/ReplaceReturns.py @@ -77,7 +77,7 @@ def visit_KeywordCall(self, node): # noqa return misc.create_statement_from_tokens( statement=ReturnStatement, tokens=node.tokens[2:], indent=node.tokens[0] ) - elif normalized_name == "returnfromkeywordif": + if normalized_name == "returnfromkeywordif": return misc.wrap_in_if_and_replace_statement(node, ReturnStatement, self.formatting_config.separator) return node diff --git a/robotidy/transformers/ReplaceRunKeywordIf.py b/robotidy/transformers/ReplaceRunKeywordIf.py index 5b200d59..b1927d6c 100644 --- a/robotidy/transformers/ReplaceRunKeywordIf.py +++ b/robotidy/transformers/ReplaceRunKeywordIf.py @@ -150,7 +150,7 @@ def create_keywords(self, arg_tokens, assign, indent): self.args_to_keyword(keyword[1:], assign, indent) for keyword in self.split_args_on_delimiters(arg_tokens, ("AND",)) ] - elif misc.is_var(keyword_name): + if misc.is_var(keyword_name): keyword_token = Token(Token.KEYWORD_NAME, "Run Keyword") arg_tokens = [keyword_token] + arg_tokens return [self.args_to_keyword(arg_tokens, assign, indent)] diff --git a/robotidy/transformers/ReplaceWithVAR.py b/robotidy/transformers/ReplaceWithVAR.py index 758e548e..b3b5572a 100644 --- a/robotidy/transformers/ReplaceWithVAR.py +++ b/robotidy/transformers/ReplaceWithVAR.py @@ -1,7 +1,17 @@ from __future__ import annotations + from typing import TYPE_CHECKING -from robot.api.parsing import Comment, ElseHeader, ElseIfHeader, End, If, IfHeader, KeywordCall, Token +from robot.api.parsing import ( + Comment, + ElseHeader, + ElseIfHeader, + End, + If, + IfHeader, + KeywordCall, + Token, +) from robot.utils.escaping import split_from_equals from robot.variables.search import is_dict_variable, is_list_variable @@ -220,11 +230,19 @@ def replace_set_variable(self, node, kw_name: str, indent: str, assign: list[str if len(values) > 1: var_name = "@" + var_name[1:] return Var.from_params( - name=var_name, value=values, separator=self.formatting_config.separator, indent=indent, scope=scope + name=var_name, + value=values, + separator=self.formatting_config.separator, + indent=indent, + scope=scope, ) return [ Var.from_params( - name=var_assign, value=value, separator=self.formatting_config.separator, indent=indent, scope=scope + name=var_assign, + value=value, + separator=self.formatting_config.separator, + indent=indent, + scope=scope, ) for var_assign, value in zip(assign, values) ] @@ -252,7 +270,11 @@ def replace_set_variable_scope(self, node, kw_name: str, indent: str, assign: li values = [var_name] scope = scope.upper() if self.explicit_local or scope != "local" else None return Var.from_params( - name=var_name, value=values, separator=self.formatting_config.separator, indent=indent, scope=scope + name=var_name, + value=values, + separator=self.formatting_config.separator, + indent=indent, + scope=scope, ) def replace_set_variable_if_kw(self, node, kw_name: str, indent: str, assign: list[str] | None = None): @@ -287,7 +309,11 @@ def replace_set_variable_if_kw(self, node, kw_name: str, indent: str, assign: li else: condition, value = args[:2] variable = Var.from_params( - name=var_name, value=value, separator=separator, indent=in_block_indent, scope=scope + name=var_name, + value=value, + separator=separator, + indent=in_block_indent, + scope=scope, ) if tail: if condition: @@ -325,7 +351,13 @@ def replace_catenate_kw(self, node, kw_name: str, indent: str, assign: list[str] else: separator = "${SPACE}" scope = "LOCAL" if self.explicit_local else None - return Var.from_params(name=var_name, value=values, indent=indent, value_separator=separator, scope=scope) + return Var.from_params( + name=var_name, + value=values, + indent=indent, + value_separator=separator, + scope=scope, + ) def replace_create_list_kw(self, node, kw_name: str, indent: str, assign: list[str] | None = None): assign = assign or self.get_assign_names(node.assign) @@ -340,7 +372,11 @@ def replace_create_list_kw(self, node, kw_name: str, indent: str, assign: list[s values = ["@{EMPTY}"] scope = "LOCAL" if self.explicit_local else None return Var.from_params( - name=var_name, value=values, separator=self.formatting_config.separator, indent=indent, scope=scope + name=var_name, + value=values, + separator=self.formatting_config.separator, + indent=indent, + scope=scope, ) def _split_dict_items(self, items: list[str]): @@ -378,5 +414,9 @@ def replace_create_dictionary_kw(self, node, kw_name: str, indent: str, assign: values = ["&{EMPTY}"] scope = "LOCAL" if self.explicit_local else None return Var.from_params( - name=var_name, value=values, separator=self.formatting_config.separator, indent=indent, scope=scope + name=var_name, + value=values, + separator=self.formatting_config.separator, + indent=indent, + scope=scope, ) diff --git a/robotidy/transformers/SplitTooLongLine.py b/robotidy/transformers/SplitTooLongLine.py index 3c1eae30..77cca7de 100644 --- a/robotidy/transformers/SplitTooLongLine.py +++ b/robotidy/transformers/SplitTooLongLine.py @@ -76,7 +76,14 @@ class SplitTooLongLine(Transformer): """ IGNORED_WHITESPACE = {Token.EOL, Token.CONTINUATION} - HANDLES_SKIP = frozenset({"skip_comments", "skip_keyword_call", "skip_keyword_call_pattern", "skip_sections"}) + HANDLES_SKIP = frozenset( + { + "skip_comments", + "skip_keyword_call", + "skip_keyword_call_pattern", + "skip_sections", + } + ) def __init__( self, @@ -176,7 +183,11 @@ def visit_Var(self, node): # noqa separator = Token(Token.SEPARATOR, self.formatting_config.separator) line = [indent, node.data_tokens[0], separator, var_name] tokens, comments = self.split_tokens( - node.tokens, line, self.split_on_every_value, indent=indent, split_types=(Token.ARGUMENT, Token.OPTION) + node.tokens, + line, + self.split_on_every_value, + indent=indent, + split_types=(Token.ARGUMENT, Token.OPTION), ) comments = [Comment([comment, EOL]) for comment in comments] node.tokens = tokens @@ -247,7 +258,14 @@ def split_to_multiple_lines(tokens, indent, separator): yield EOL first = False - def split_tokens(self, tokens, line, split_on, indent=None, split_types: tuple = (Token.ARGUMENT,)): + def split_tokens( + self, + tokens, + line, + split_on, + indent=None, + split_types: tuple = (Token.ARGUMENT,), + ): separator = Token(Token.SEPARATOR, self.formatting_config.separator) align_new_line = self.align_new_line and not split_on if align_new_line: @@ -286,7 +304,8 @@ def split_tokens(self, tokens, line, split_on, indent=None, split_types: tuple = @staticmethod def join_split_comments(comments: list, token: Token, last_separator: Token): - """Join split comments when splitting line. + """ + Join split comments when splitting line. AST splits comments with separators, e.g. "# Comment rest" -> ["# Comment", " ", "rest"]. Notice the third value not starting with a hash - we need to join such comment with previous comment. @@ -338,7 +357,10 @@ def split_keyword_call(self, node): else: head = [] tokens, comments = self.split_tokens( - node.tokens[node.tokens.index(keyword) + 1 :], line, self.split_on_every_arg, indent + node.tokens[node.tokens.index(keyword) + 1 :], + line, + self.split_on_every_arg, + indent, ) head.extend(tokens) comment_tokens = [] diff --git a/robotidy/transformers/Translate.py b/robotidy/transformers/Translate.py index 9f46e6d7..2449d7a2 100644 --- a/robotidy/transformers/Translate.py +++ b/robotidy/transformers/Translate.py @@ -79,7 +79,11 @@ def __init__( self.language, self.settings = None, None self._bdd_mapping = None self.bdd = self.get_translated_bdd( - but_alternative, given_alternative, and_alternative, then_alternative, when_alternative + but_alternative, + given_alternative, + and_alternative, + then_alternative, + when_alternative, ) @property @@ -169,7 +173,7 @@ def visit_KeywordCall(self, node): # noqa if not self.translate_bdd or not node.keyword: return node prefix, *name = node.keyword.split(maxsplit=1) - if not name or not prefix.title() in self.languages.bdd_prefixes: + if not name or prefix.title() not in self.languages.bdd_prefixes: return node english_bdd = self.bdd_mapping.get(prefix.title(), None) if not english_bdd: @@ -235,28 +239,8 @@ def visit_Setup(self, node): # noqa node.data_tokens[0].value = translated_value return self.generic_visit(node) - visit_Teardown = ( - visit_Template - ) = ( - visit_Timeout - ) = ( - visit_Arguments - ) = ( - visit_Tags - ) = ( - visit_Documentation - ) = ( + visit_Teardown = visit_Template = visit_Timeout = visit_Arguments = visit_Tags = visit_Documentation = ( visit_Metadata - ) = ( - visit_SuiteSetup - ) = ( - visit_SuiteTeardown - ) = ( - visit_TestSetup - ) = ( - visit_TestTeardown - ) = ( - visit_TestTemplate - ) = ( + ) = visit_SuiteSetup = visit_SuiteTeardown = visit_TestSetup = visit_TestTeardown = visit_TestTemplate = ( visit_TestTimeout ) = visit_KeywordTags = visit_LibraryImport = visit_VariablesImport = visit_ResourceImport = visit_Setup diff --git a/robotidy/transformers/__init__.py b/robotidy/transformers/__init__.py index eed6d66a..4a541927 100644 --- a/robotidy/transformers/__init__.py +++ b/robotidy/transformers/__init__.py @@ -8,14 +8,15 @@ If you don't want to run your transformer by default and only when calling robotidy with --transform YourTransformer then add ``ENABLED = False`` class attribute inside. """ + from __future__ import annotations import copy import inspect import pathlib import textwrap +from collections.abc import Iterable from itertools import chain -from typing import Iterable try: import rich_click as click @@ -26,7 +27,11 @@ from robot.errors import DataError from robot.utils.importer import Importer -from robotidy.exceptions import ImportTransformerError, InvalidParameterError, InvalidParameterFormatError +from robotidy.exceptions import ( + ImportTransformerError, + InvalidParameterError, + InvalidParameterFormatError, +) from robotidy.skip import Skip, SkipConfig from robotidy.utils import misc @@ -97,7 +102,7 @@ def convert_args(self, args): converted[param] = value return converted - def join_transformer_configs(self, transformer_config: "TransformConfig"): + def join_transformer_configs(self, transformer_config: TransformConfig): """ Join 2 configurations i.e. from --transform, --load-transformers or --config. """ @@ -113,7 +118,7 @@ def join_transformer_configs(self, transformer_config: "TransformConfig"): self.custom_transformer = self.custom_transformer or transformer_config.custom_transformer self.join_args(transformer_config) - def join_args(self, transformer_config: "TransformConfig"): + def join_args(self, transformer_config: TransformConfig): self.args.update(transformer_config.args) @@ -196,7 +201,7 @@ def validate_config_names(self): transformer_names = [name for name, transf in self.transformers.items() if not transf.is_config_only] similar = similar_finder.find_similar(transf_name, transformer_names) raise ImportTransformerError( - f"Configuring transformer '{transf_name}' failed. " f"Verify if correct name was provided.{similar}" + f"Configuring transformer '{transf_name}' failed. Verify if correct name was provided.{similar}" ) from None @@ -205,7 +210,10 @@ def convert_transform_config(value: str, param_name: str) -> TransformConfig: custom_transformer = param_name == "custom_transformers" is_config = param_name == "configure" return TransformConfig( - value, force_include=force_included, custom_transformer=custom_transformer, is_config=is_config + value, + force_include=force_included, + custom_transformer=custom_transformer, + is_config=is_config, ) @@ -325,14 +333,20 @@ def import_transformer(name, config: TransformConfigMap, skip) -> Iterable[Trans imported = IMPORTER.import_class_or_module(name) if inspect.isclass(imported): yield create_transformer_instance( - imported, short_name, config.get_args(name, short_name, import_path), skip + imported, + short_name, + config.get_args(name, short_name, import_path), + skip, ) else: transformers = load_transformers_from_module(imported) transformers = order_transformers(transformers, imported) for name, transformer_class in transformers.items(): yield create_transformer_instance( - transformer_class, name, config.get_args(name, short_name, import_path), skip + transformer_class, + name, + config.get_args(name, short_name, import_path), + skip, ) except DataError: similar_finder = misc.RecommendationFinder() @@ -375,8 +389,10 @@ def resolve_argument_names(argument_names: list[str], handles_skip): def assert_handled_arguments(transformer, args, argument_names): - """Check if provided arguments are handled by given transformer. - Raises InvalidParameterError if arguments does not match.""" + """ + Check if provided arguments are handled by given transformer. + Raises InvalidParameterError if arguments does not match. + """ arg_names = [arg.split("=")[0] for arg in args] for arg in arg_names: # it's fine to only check for first non-matching parameter @@ -486,11 +502,10 @@ def load_transformers( for container in import_transformer(name, transformers_config, skip): if transformers_config.force_included_only: enabled = container.args.get("enabled", True) + elif "enabled" in container.args: + enabled = container.args["enabled"] else: - if "enabled" in container.args: - enabled = container.args["enabled"] - else: - enabled = getattr(container.instance, "ENABLED", True) + enabled = getattr(container.instance, "ENABLED", True) if not (enabled or allow_disabled): continue if can_run_in_robot_version( @@ -501,7 +516,7 @@ def load_transformers( container.enabled_by_default = enabled loaded_transformers.append(container) elif allow_version_mismatch and allow_disabled: - setattr(container.instance, "ENABLED", False) + container.instance.ENABLED = False container.enabled_by_default = False loaded_transformers.append(container) return loaded_transformers diff --git a/robotidy/transformers/aligners_core.py b/robotidy/transformers/aligners_core.py index d8d712e5..30841b87 100644 --- a/robotidy/transformers/aligners_core.py +++ b/robotidy/transformers/aligners_core.py @@ -20,7 +20,6 @@ class AlignKeywordsTestsSection(Transformer): - ENABLED = False DEFAULT_WIDTH = 24 HANDLES_SKIP = frozenset( @@ -199,10 +198,13 @@ def visit_Documentation(self, node): # noqa len(prev_token.value) + self.formatting_config.space_count ) - len(prev_token.value) else: - separator_len = max(width - len(prev_token.value), self.formatting_config.space_count) + separator_len = max( + width - len(prev_token.value), + self.formatting_config.space_count, + ) token.value = " " * separator_len break - elif token.type != Token.ARGUMENT: # ... # comment edge case + if token.type != Token.ARGUMENT: # ... # comment edge case prev_token = token return node @@ -250,7 +252,13 @@ def visit_KeywordCall(self, node): # noqa def should_skip_return_values(self, line: list[Token], possible_assign: bool) -> bool: return possible_assign and self.skip.return_values and any(token.type == Token.ASSIGN for token in line) - def align_node(self, node, check_length: bool, possible_assign: bool = False, is_setting: bool = False): + def align_node( + self, + node, + check_length: bool, + possible_assign: bool = False, + is_setting: bool = False, + ): indent = Token(Token.SEPARATOR, self.indent * self.formatting_config.indent) aligned_lines = [] for line in node.lines: @@ -271,11 +279,13 @@ def split_assign(self, line: list, possible_assign: bool) -> tuple[list, list, i This method returns return values together with their separators in case we don't want to align them. - Returns: + Returns + ------- A tuple, containing: - return values, - remaining tokens, - widths of the return values (used to determine next alignment column) + """ if not self.should_skip_return_values(line, possible_assign): return [], get_data_tokens(line), 0 @@ -446,13 +456,21 @@ def align_tokens(self, tokens: list, skip_width: int, is_setting: bool): misaligned_cols += 1 while prev_overflow_len > width: column += 1 - width = self.get_width(column, override_default_zero=True, is_setting=is_setting) + width = self.get_width( + column, + override_default_zero=True, + is_setting=is_setting, + ) prev_overflow_len -= width misaligned_cols += 1 if self.too_many_misaligned_cols(misaligned_cols, prev_overflow_len, tokens, index): # check if next col fits next token with prev_overflow, if not, jump to the next column next_token = tokens[index + 1] - next_width = self.get_width(column + 1, override_default_zero=True, is_setting=is_setting) + next_width = self.get_width( + column + 1, + override_default_zero=True, + is_setting=is_setting, + ) required_width = next_width - prev_overflow_len - len(next_token.value) if required_width < min_separator: column += 1 @@ -461,7 +479,11 @@ def align_tokens(self, tokens: list, skip_width: int, is_setting: bool): else: # "overflow" while misc.round_to_four(len(token.value) + min_separator) > width: column += 1 - width += self.get_width(column, override_default_zero=True, is_setting=is_setting) + width += self.get_width( + column, + override_default_zero=True, + is_setting=is_setting, + ) separator_len = width - len(token.value) separator_len = max( min_separator, separator_len @@ -586,11 +608,17 @@ def calculate_column_widths(self): self.settings_widths[column] = max(filter_widths, default=max_width) def get_and_store_columns_widths( - self, node, widths: defaultdict, up_to: int = 0, filter_tokens: frozenset | None = None + self, + node, + widths: defaultdict, + up_to: int = 0, + filter_tokens: frozenset | None = None, ): """ Save columns widths to use them later to find the longest token in column. + Args: + ---- node: Analyzed node widths: Where column widths should be stored (either generic widths or settings widths) up_to: If set, only parse first up_to columns diff --git a/robotidy/utils/misc.py b/robotidy/utils/misc.py index e4501c19..648970bd 100644 --- a/robotidy/utils/misc.py +++ b/robotidy/utils/misc.py @@ -4,9 +4,10 @@ import difflib import os import re +from collections.abc import Iterable from enum import Enum from functools import total_ordering -from typing import Iterable, Pattern +from re import Pattern import click @@ -121,7 +122,8 @@ def after_last_dot(name): def split_args_from_name_or_path(name): - """Split arguments embedded to name or path like ``Example:arg1:arg2``. + """ + Split arguments embedded to name or path like ``Example:arg1:arg2``. The separator can be either colon ``:`` or semicolon ``;``. If both are used, the first one is considered to be the separator. @@ -198,7 +200,7 @@ def tokens_by_lines(node): def left_align(node): - """remove leading separator token""" + """Remove leading separator token""" tokens = list(node.tokens) if tokens: tokens[0].value = tokens[0].value.lstrip(" \t") @@ -372,8 +374,7 @@ def collect_comments_from_tokens(tokens, indent): eol = Token(Token.EOL) if indent: return [Comment([indent, comment, eol]) for comment in comments] - else: - return [Comment([comment, eol]) for comment in comments] + return [Comment([comment, eol]) for comment in comments] def flatten_multiline(tokens, separator, remove_comments: bool = False): @@ -416,8 +417,7 @@ def split_on_token_value(tokens, value, resolve: int): branch = tokens[index : index + resolve] remainder = tokens[index + resolve :] return prefix, branch, remainder - else: - return [], [], tokens + return [], [], tokens def join_tokens_with_token(tokens, token): diff --git a/robotidy/utils/variable_matcher.py b/robotidy/utils/variable_matcher.py index 1e821e08..631799e5 100644 --- a/robotidy/utils/variable_matcher.py +++ b/robotidy/utils/variable_matcher.py @@ -3,10 +3,15 @@ try: from robot.variables import VariableMatches except ImportError: - from typing import Iterator, Sequence + from collections.abc import Iterator, Sequence class VariableMatches: - def __init__(self, string: str, identifiers: Sequence[str] = "$@&%", ignore_errors: bool = False): + def __init__( + self, + string: str, + identifiers: Sequence[str] = "$@&%", + ignore_errors: bool = False, + ): self.string = string self.identifiers = identifiers self.ignore_errors = ignore_errors diff --git a/setup.py b/setup.py index 45ead73b..a1e3782c 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,6 @@ License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python -Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 @@ -39,7 +38,7 @@ keywords="robotframework", packages=["robotidy"], include_package_data=True, - python_requires=">=3.8", + python_requires=">=3.9", install_requires=[ "robotframework>=4.0,<8.0", "click==8.1.*", diff --git a/tests/atest/configuration_files/multiple_configs/test_multiple_configs.py b/tests/atest/configuration_files/multiple_configs/test_multiple_configs.py index b7f86faa..ed19dc70 100644 --- a/tests/atest/configuration_files/multiple_configs/test_multiple_configs.py +++ b/tests/atest/configuration_files/multiple_configs/test_multiple_configs.py @@ -11,7 +11,7 @@ def test(self, tmpdir): self.compare_files(tmpdir, "expected") def test_with_config_option(self, tmpdir): - """config option should stop from loading other configuration files.""" + """Config option should stop from loading other configuration files.""" config_path = tmpdir / self.TEST_DIR / "root" / "pyproject.toml" args = ["--config", str(config_path)] self.run_tidy(tmpdir, args) diff --git a/tests/atest/transformers/NormalizeNewLines/test_normalize_new_lines.py b/tests/atest/transformers/NormalizeNewLines/test_normalize_new_lines.py index 4dd48568..f6ad7552 100644 --- a/tests/atest/transformers/NormalizeNewLines/test_normalize_new_lines.py +++ b/tests/atest/transformers/NormalizeNewLines/test_normalize_new_lines.py @@ -34,10 +34,10 @@ def test_templated_tests_separated(self): ) def test_test_case_last_0(self): - self.compare(source=f"test_case_last_0_lines.robot", expected="test_case_last.robot") + self.compare(source="test_case_last_0_lines.robot", expected="test_case_last.robot") def test_test_case_last_1(self): - self.compare(source=f"test_case_last_1_lines.robot", not_modified=True) + self.compare(source="test_case_last_1_lines.robot", not_modified=True) @pytest.mark.parametrize("empty_lines", [0, 1, 2]) def test_consecutive_empty_lines(self, empty_lines): @@ -49,7 +49,11 @@ def test_consecutive_empty_lines(self, empty_lines): @pytest.mark.parametrize("trailing_lines", [0, 1, 2]) def test_inline_if(self, trailing_lines): - self.compare(source=f"inline_if_{trailing_lines}_lines.robot", expected="inline_if.robot", target_version=">=5") + self.compare( + source=f"inline_if_{trailing_lines}_lines.robot", + expected="inline_if.robot", + target_version=">=5", + ) def test_disablers(self): self.compare(source="disablers.robot", not_modified=True) @@ -67,6 +71,8 @@ def test_language_header(self): self.compare(source="language_header_0empty.robot", target_version=">=6") self.compare(source="language_header_2empty.robot", target_version=">=6") self.compare( - source="language_header_5empty.robot", expected="language_header_2empty.robot", target_version=">=6" + source="language_header_5empty.robot", + expected="language_header_2empty.robot", + target_version=">=6", ) self.compare(source="language_header_and_comments.robot", target_version=">=6") diff --git a/tests/atest/transformers/NormalizeSeparators/test_transformer.py b/tests/atest/transformers/NormalizeSeparators/test_transformer.py index 94f0e333..f72904bd 100644 --- a/tests/atest/transformers/NormalizeSeparators/test_transformer.py +++ b/tests/atest/transformers/NormalizeSeparators/test_transformer.py @@ -25,7 +25,7 @@ def test_disable_section(self, skip_sections): self.compare( source="test.robot", not_modified=True, - config=f":skip_sections=settings,variables,testcases,keywords,comments", + config=":skip_sections=settings,variables,testcases,keywords,comments", ) elif not skip_sections: self.compare(source="test.robot", expected="skip_none.robot") @@ -40,7 +40,8 @@ def test_rf5_syntax(self): self.compare(source="rf5_syntax.robot", target_version=">=5") @pytest.mark.parametrize( - "disablers", ["disablers.robot", "disablers2.robot", "disablers3.robot", "disablers4.robot"] + "disablers", + ["disablers.robot", "disablers2.robot", "disablers3.robot", "disablers4.robot"], ) def test_disablers(self, disablers): self.compare(source=disablers, not_modified=True) @@ -49,10 +50,17 @@ def test_skip_documentation_default(self): self.compare(source="test.robot", config=":skip_documentation=False") def test_skip_documentation(self): - self.compare(source="test.robot", expected="skip_documentation.robot", config=":skip_documentation=True") + self.compare( + source="test.robot", + expected="skip_documentation.robot", + config=":skip_documentation=True", + ) def test_continuation_indent(self): - self.compare(source="continuation_indent.robot", config=" --continuation-indent 4 --indent 4 --spacecount 2") + self.compare( + source="continuation_indent.robot", + config=" --continuation-indent 4 --indent 4 --spacecount 2", + ) @pytest.mark.parametrize("indent", [2, 4]) @pytest.mark.parametrize("spaces", [2, 4]) @@ -67,7 +75,7 @@ def test_inline_if(self, spaces, indent): def test_inline_if_flatten(self): self.compare( source="inline_if.robot", - expected=f"inline_if_flatten.robot", + expected="inline_if_flatten.robot", config=":flatten_lines=True:align_new_line=True --indent 4 --spacecount 4", target_version=">=5", ) @@ -76,27 +84,41 @@ def test_skip_keyword_call(self): self.compare( source="test.robot", expected="test_skip_keyword.robot", - config=":skip_keyword_call_pattern=(?i)should\sbe\sequal", + config=r":skip_keyword_call_pattern=(?i)should\sbe\sequal", ) def test_file_with_pipes_bug390(self): self.compare(source="bug390.robot") @pytest.mark.parametrize( - "config", [":skip_comments=True:skip_block_comments=True", ":skip_comments=True", ":skip_block_comments=True"] + "config", + [ + ":skip_comments=True:skip_block_comments=True", + ":skip_comments=True", + ":skip_block_comments=True", + ], ) def test_comments(self, config): if "skip_comments" in config: expected = "comments_skip_comments.robot" else: expected = "comments_skip_block_comments.robot" - self.compare(source="comments.robot", expected=expected, config=config, target_version=">=5") + self.compare( + source="comments.robot", + expected=expected, + config=config, + target_version=">=5", + ) def test_flatten_lines(self): if ROBOT_VERSION.major > 4: self.compare(source="flatten.robot", config=":flatten_lines=True") else: - self.compare(source="flatten.robot", expected="flatten_rf4.robot", config=":flatten_lines=True") + self.compare( + source="flatten.robot", + expected="flatten_rf4.robot", + config=":flatten_lines=True", + ) def test_align_new_line(self): self.compare( diff --git a/tests/atest/transformers/NormalizeTags/test_transformer.py b/tests/atest/transformers/NormalizeTags/test_transformer.py index 5251b324..8f26bd34 100644 --- a/tests/atest/transformers/NormalizeTags/test_transformer.py +++ b/tests/atest/transformers/NormalizeTags/test_transformer.py @@ -13,14 +13,14 @@ def test_lowercase(self): self.compare( source="tests.robot", expected="lowercase.robot", - config=f":case=lowercase:normalize_case=True", + config=":case=lowercase:normalize_case=True", ) def test_uppercase(self): - self.compare(source="tests.robot", expected="uppercase.robot", config=f":case=uppercase") + self.compare(source="tests.robot", expected="uppercase.robot", config=":case=uppercase") def test_titlecase(self): - self.compare(source="tests.robot", expected="titlecase.robot", config=f":case=titlecase") + self.compare(source="tests.robot", expected="titlecase.robot", config=":case=titlecase") def test_wrong_case(self): result = self.run_tidy( @@ -35,10 +35,11 @@ def test_wrong_case(self): assert expected_output == result.output def test_only_remove_duplicates(self): - self.compare(source="duplicates.robot", config=f":normalize_case=False") + self.compare(source="duplicates.robot", config=":normalize_case=False") @pytest.mark.parametrize( - "disablers", ["disablers.robot", "disablers2.robot", "disablers3.robot", "disablers4.robot"] + "disablers", + ["disablers.robot", "disablers2.robot", "disablers3.robot", "disablers4.robot"], ) def test_disablers(self, disablers): self.compare(source=disablers, not_modified=True) @@ -57,12 +58,16 @@ def test_rf6(self): def test_preserve_format(self): self.compare( - source="preserve_format.robot", expected="preserve_format_enabled.robot", config=":preserve_format=True" + source="preserve_format.robot", + expected="preserve_format_enabled.robot", + config=":preserve_format=True", ) def test_preserve_format_do_not_normalize_case(self): self.compare( - source="preserve_format.robot", config=":preserve_format=True:normalize_case=False", not_modified=True + source="preserve_format.robot", + config=":preserve_format=True:normalize_case=False", + not_modified=True, ) def test_ignore_format(self): diff --git a/tests/atest/transformers/OrderTags/test_transformer.py b/tests/atest/transformers/OrderTags/test_transformer.py index f8982da2..73e94091 100644 --- a/tests/atest/transformers/OrderTags/test_transformer.py +++ b/tests/atest/transformers/OrderTags/test_transformer.py @@ -11,42 +11,42 @@ def test_case_insensitive(self): self.compare( source="tests.robot", expected="case_insensitive.robot", - config=f":case_sensitive=False:reverse=False", + config=":case_sensitive=False:reverse=False", ) def test_case_sensitive(self): self.compare( source="tests.robot", expected="case_sensitive.robot", - config=f":case_sensitive=True:reverse=False", + config=":case_sensitive=True:reverse=False", ) def test_insensitive_reverse(self): self.compare( source="tests.robot", expected="case_insensitive_reverse.robot", - config=f":case_sensitive=False:reverse=True", + config=":case_sensitive=False:reverse=True", ) def test_case_sensitive_reverse(self): self.compare( source="tests.robot", expected="case_sensitive_reverse.robot", - config=f":case_sensitive=True:reverse=True", + config=":case_sensitive=True:reverse=True", ) def test_default_tags_false(self): self.compare( source="tests.robot", expected="default_tags_false.robot", - config=f":case_sensitive=False:reverse=False:default_tags=False", + config=":case_sensitive=False:reverse=False:default_tags=False", ) def test_force_tags_false(self): self.compare( source="tests.robot", expected="force_tags_false.robot", - config=f":case_sensitive=False:reverse=False:force_tags=False", + config=":case_sensitive=False:reverse=False:force_tags=False", ) def test_disablers(self): diff --git a/tests/e2e/test_transform_stability.py b/tests/e2e/test_transform_stability.py index 08397bc5..b0ea1407 100644 --- a/tests/e2e/test_transform_stability.py +++ b/tests/e2e/test_transform_stability.py @@ -38,11 +38,26 @@ "test": 2, "test_disablers": 2, }, - "MergeAndOrderSections": {"disablers": 3, "parsing_error": 2, "translated": 2, "tests": 3}, + "MergeAndOrderSections": { + "disablers": 3, + "parsing_error": 2, + "translated": 2, + "tests": 3, + }, "NormalizeNewLines": {"tests": 2, "multiline": 2}, - "NormalizeSeparators": {"continuation_indent": 2, "test": 2, "disablers": 2, "pipes": 2}, + "NormalizeSeparators": { + "continuation_indent": 2, + "test": 2, + "disablers": 2, + "pipes": 2, + }, "NormalizeSettingName": {"disablers": 2, "translated": 2, "tests": 2}, - "NormalizeTags": {"disablers": 2, "duplicates": 2, "tests": 2, "preserve_format": 2}, + "NormalizeTags": { + "disablers": 2, + "duplicates": 2, + "tests": 2, + "preserve_format": 2, + }, "OrderSettings": {"test": 2, "translated": 2}, "OrderSettingsSection": {"test": 2}, "OrderTags": {"tests": 2}, @@ -64,10 +79,20 @@ }, "ReplaceWithVAR": {"invalid_inline_if": 2}, "SmartSortKeywords": {"multiple_sections": 2, "sort_input": 2}, - "SplitTooLongLine": {"continuation_indent": 2, "disablers": 2, "tests": 2, "comments": 2, "settings": 2}, + "SplitTooLongLine": { + "continuation_indent": 2, + "disablers": 2, + "tests": 2, + "comments": 2, + "settings": 2, + }, "Translate": {"pl_language_header": 2}, } -SKIP_TESTS_4 = {"ReplaceReturns": {"test"}, "GenerateDocumentation": {"test": 2}, "SplitTooLongLine": {"settings"}} +SKIP_TESTS_4 = { + "ReplaceReturns": {"test"}, + "GenerateDocumentation": {"test": 2}, + "SplitTooLongLine": {"settings"}, +} SKIP_TESTS = { "ReplaceRunKeywordIf": {"invalid_data"}, "SplitTooLongLine": {"variables"}, diff --git a/tests/rf_versions_matrix/requirements_rf7.txt b/tests/rf_versions_matrix/requirements_rf7.txt index e77b50e1..608c82a9 100644 --- a/tests/rf_versions_matrix/requirements_rf7.txt +++ b/tests/rf_versions_matrix/requirements_rf7.txt @@ -1 +1 @@ -robotframework==7.0 \ No newline at end of file +robotframework==7.1 \ No newline at end of file diff --git a/tests/utest/test_cli.py b/tests/utest/test_cli.py index b2d61867..3ad7542f 100644 --- a/tests/utest/test_cli.py +++ b/tests/utest/test_cli.py @@ -10,7 +10,13 @@ from robotidy import skip from robotidy.config import RawConfig -from robotidy.files import DEFAULT_EXCLUDES, find_project_root, get_paths, load_toml_file, read_pyproject_config +from robotidy.files import ( + DEFAULT_EXCLUDES, + find_project_root, + get_paths, + load_toml_file, + read_pyproject_config, +) from robotidy.transformers.aligners_core import AlignKeywordsTestsSection from robotidy.transformers.AlignSettingsSection import AlignSettingsSection from robotidy.utils import misc @@ -46,7 +52,10 @@ class TestCli: [ ("NotExisting", ""), ("AlignSettings", " Did you mean:\n AlignSettingsSection"), - ("align", " Did you mean:\n AlignSettingsSection\n AlignVariablesSection"), + ( + "align", + " Did you mean:\n AlignSettingsSection\n AlignVariablesSection", + ), ("splittoolongline", " Did you mean:\n SplitTooLongLine"), ("AssignmentNormalizer", " Did you mean:\n NormalizeAssignments"), ], @@ -175,7 +184,10 @@ def test_read_robotidy_config(self): "overwrite": False, "diff": False, "spacecount": 4, - "transform": ["DiscardEmptySections:allow_only_comments=True", "ReplaceRunKeywordIf"], + "transform": [ + "DiscardEmptySections:allow_only_comments=True", + "ReplaceRunKeywordIf", + ], } config_path = TEST_DATA_DIR / "config" / "robotidy.toml" config = read_pyproject_config(config_path) @@ -189,7 +201,10 @@ def test_read_pyproject_config(self): "startline": 10, "endline": 20, "exclude": "Regex\\s", - "transform": ["DiscardEmptySections:allow_only_comments=True", "SplitTooLongLine"], + "transform": [ + "DiscardEmptySections:allow_only_comments=True", + "SplitTooLongLine", + ], "configure": [ "DiscardEmptySections:allow_only_comments=False", "OrderSettings: keyword_before = documentation,tags,timeout,arguments", @@ -276,7 +291,10 @@ def test_list_transformers_invalid_filter_value(self, flag): @pytest.mark.parametrize( "name, expected_doc", [ - ("ReplaceRunKeywordIf", "Run Keywords inside Run Keyword If will be split into separate keywords:"), + ( + "ReplaceRunKeywordIf", + "Run Keywords inside Run Keyword If will be split into separate keywords:", + ), ("SmartSortKeywords", "By default sorting is case insensitive, but"), ], ) @@ -298,7 +316,10 @@ def test_describe_transformer_all(self): [ ("NotExisting", ""), ("AlignSettings", " Did you mean:\n AlignSettingsSection"), - ("align", " Did you mean:\n AlignSettingsSection\n AlignVariablesSection"), + ( + "align", + " Did you mean:\n AlignSettingsSection\n AlignVariablesSection", + ), ("splittoolongline", " Did you mean:\n SplitTooLongLine"), ("AssignmentNormalizer", " Did you mean:\n NormalizeAssignments"), ], @@ -312,13 +333,21 @@ def test_describe_invalid_transformer(self, name, similar): @pytest.mark.parametrize("flag", ["--help", "-h"]) def test_help(self, flag): result = run_tidy([flag]) - assert f"Robotidy is a tool for formatting" in result.output + assert "Robotidy is a tool for formatting" in result.output @pytest.mark.parametrize( "source, return_status, expected_output", [ - ("golden.robot", 0, "\n0 files would be reformatted, 1 file would be left unchanged.\n"), - ("not_golden.robot", 1, "\n1 file would be reformatted, 0 files would be left unchanged.\n"), + ( + "golden.robot", + 0, + "\n0 files would be reformatted, 1 file would be left unchanged.\n", + ), + ( + "not_golden.robot", + 1, + "\n1 file would be reformatted, 0 files would be left unchanged.\n", + ), ], ) def test_check(self, source, return_status, expected_output): @@ -346,7 +375,13 @@ def test_check_overwrite(self, source, return_status, expected_output): expected_output = f"Reformatted {source}\n{expected_output}" with patch("robotidy.utils.misc.ModelWriter") as mock_writer: result = run_tidy( - ["--check", "--overwrite", "--transform", "NormalizeSectionHeaderName", str(source)], + [ + "--check", + "--overwrite", + "--transform", + "NormalizeSectionHeaderName", + str(source), + ], exit_code=return_status, ) if return_status: @@ -383,7 +418,15 @@ def test_disable_coloring(self, color_flag, color_env): def test_diff(self): source = TEST_DATA_DIR / "check" / "not_golden.robot" - result = run_tidy(["--diff", "--no-overwrite", "--transform", "NormalizeSectionHeaderName", str(source)]) + result = run_tidy( + [ + "--diff", + "--no-overwrite", + "--transform", + "NormalizeSectionHeaderName", + str(source), + ] + ) assert "*** settings ***" in result.output assert "*** Settings ***" in result.output @@ -396,7 +439,12 @@ def test_line_sep(self, line_sep): run_tidy(["--lineseparator", line_sep, str(source)], output="test.robot") else: run_tidy([str(source)], output="test.robot") - line_end = {"unix": "\n", "windows": "\r\n", "native": os.linesep, None: os.linesep}[line_sep] + line_end = { + "unix": "\n", + "windows": "\r\n", + "native": os.linesep, + None: os.linesep, + }[line_sep] with open(str(expected)) as f: expected_str = f.read() expected_str = expected_str.replace("\n", line_end) @@ -408,7 +456,11 @@ def test_line_sep(self, line_sep): @pytest.mark.parametrize( "exclude, extend_exclude, allowed", [ - (DEFAULT_EXCLUDES, None, ["nested/test.robot", "test.resource", "test.robot"]), + ( + DEFAULT_EXCLUDES, + None, + ["nested/test.robot", "test.resource", "test.robot"], + ), ("test.resource", None, ["test.robot", "nested/test.robot"]), (DEFAULT_EXCLUDES, "test.resource", ["test.robot", "nested/test.robot"]), ("test.resource", "nested/*", ["test.robot"]), @@ -442,7 +494,11 @@ def test_exclude_gitignore(self, exclude, extend_exclude, skip_gitignore, allowe ["test3.robot"], "0 files reformatted, 1 file left unchanged.", ), # calls: robotidy test3.robot - ("test.robot", ["test.robot"], "0 files reformatted, 1 file left unchanged."), + ( + "test.robot", + ["test.robot"], + "0 files reformatted, 1 file left unchanged.", + ), ( ".", ["test.robot", "test3.robot", "resources/test.robot"], @@ -458,7 +514,10 @@ def test_src_and_space_in_param_in_configuration(self, source, should_parse, sum result = run_tidy([str(source)]) else: result = run_tidy() - expected = [f"Loaded configuration from {source_dir / 'pyproject.toml'}", summary] + expected = [ + f"Loaded configuration from {source_dir / 'pyproject.toml'}", + summary, + ] for file in should_parse: path = source_dir / file expected.append(f"Found {path} file") @@ -478,9 +537,7 @@ def test_loading_from_stdin(self): "*** Variables ***\n\n\n\n" "*** Keywords ***\nKeyword\n Keyword1 ${arg}\n" ) - expected_output = ( - "*** Settings ***\nLibrary SomeLib\n\n\n" "*** Keywords ***\nKeyword\n Keyword1 ${arg}\n\n" - ) + expected_output = "*** Settings ***\nLibrary SomeLib\n\n\n*** Keywords ***\nKeyword\n Keyword1 ${arg}\n\n" args = "--transform DiscardEmptySections -".split() result = run_tidy(args, std_in=input_file) assert result.output == expected_output @@ -491,7 +548,7 @@ def test_invalid_target_version(self, mocked_version, target_version): mocked_version.major = 5 result = run_tidy(f"--target-version {target_version} .".split(), exit_code=2) error = self.normalize_cli_error(result.stderr) - assert f"Invalid value for '--target-version' / '-tv':" in error + assert "Invalid value for '--target-version' / '-tv':" in error def normalize_cli_error(self, error): error = error.replace("│", "").replace("\n", "") @@ -513,7 +570,11 @@ def test_too_recent_target_version(self, mocked_version, option_name): def test_skip_options(self, tmp_path): alternate_names = {"--skip-return-statement": "--skip-return"} - with_values = {"--skip-keyword-call-pattern", "--skip-keyword-call", "--skip-sections"} + with_values = { + "--skip-keyword-call-pattern", + "--skip-keyword-call", + "--skip-sections", + } option_names = [] for skip_option in skip.SkipConfig.HANDLES: option = f"--{skip_option.replace('_', '-')}" @@ -536,7 +597,7 @@ def test_load_custom_transformers_from_cli(self, option_name, tmp_path): run_tidy([option_name, str(custom_transformer), str(tmp_path)]) def test_exclude_pattern_from_config(self): - config_path = TEST_DATA_DIR / "only_pyproject" / f"pyproject.toml" + config_path = TEST_DATA_DIR / "only_pyproject" / "pyproject.toml" config_file = read_pyproject_config(config_path) config = RawConfig().from_config_file(config_file, config_path) assert config.exclude == re.compile("Regex\\s") @@ -544,7 +605,11 @@ def test_exclude_pattern_from_config(self): class TestGenerateConfig: def validate_generated_default_configuration( - self, config_path: Path, diff: bool, add_missing_enabled: bool, rename_variables_enabled: bool + self, + config_path: Path, + diff: bool, + add_missing_enabled: bool, + rename_variables_enabled: bool, ): assert config_path.is_file() config = load_toml_file(config_path) @@ -572,7 +637,10 @@ def test_generate_default_config(self, temporary_cwd): config_path = temporary_cwd / "pyproject.toml" run_tidy(["--generate-config"]) self.validate_generated_default_configuration( - config_path, diff=False, add_missing_enabled=True, rename_variables_enabled=False + config_path, + diff=False, + add_missing_enabled=True, + rename_variables_enabled=False, ) def test_generate_config_ignore_existing_config(self, temporary_cwd): @@ -581,21 +649,30 @@ def test_generate_config_ignore_existing_config(self, temporary_cwd): shutil.copy(orig_config_path, config_path) run_tidy(["--generate-config"]) self.validate_generated_default_configuration( - config_path, diff=False, add_missing_enabled=True, rename_variables_enabled=False + config_path, + diff=False, + add_missing_enabled=True, + rename_variables_enabled=False, ) def test_generate_config_with_filename(self, temporary_cwd): config_path = temporary_cwd / "different.txt" run_tidy(["--generate-config", "different.txt"]) self.validate_generated_default_configuration( - config_path, diff=False, add_missing_enabled=True, rename_variables_enabled=False + config_path, + diff=False, + add_missing_enabled=True, + rename_variables_enabled=False, ) def test_generate_config_with_cli_config(self, temporary_cwd): config_path = temporary_cwd / "pyproject.toml" run_tidy(["--generate-config", "--diff", "--transform", "RenameVariables"]) self.validate_generated_default_configuration( - config_path, diff=True, add_missing_enabled=False, rename_variables_enabled=True + config_path, + diff=True, + add_missing_enabled=False, + rename_variables_enabled=True, ) def test_missing_dependency(self, monkeypatch, temporary_cwd): diff --git a/tests/utest/test_utils.py b/tests/utest/test_utils.py index 7451e9eb..bb619df7 100644 --- a/tests/utest/test_utils.py +++ b/tests/utest/test_utils.py @@ -5,7 +5,11 @@ from robotidy.app import Robotidy from robotidy.config import FormattingConfig, MainConfig, RawConfig -from robotidy.utils.misc import ROBOT_VERSION, decorate_diff_with_color, split_args_from_name_or_path +from robotidy.utils.misc import ( + ROBOT_VERSION, + decorate_diff_with_color, + split_args_from_name_or_path, +) @pytest.fixture @@ -54,14 +58,26 @@ def test_diff_lines_colorized(self): "name_or_path, expected_name, expected_args", [ ("DiscardEmptySections", "DiscardEmptySections", []), - ("DiscardEmptySections:allow_only_comments=True", "DiscardEmptySections", ["allow_only_comments=True"]), - ("DiscardEmptySections;allow_only_comments=True", "DiscardEmptySections", ["allow_only_comments=True"]), + ( + "DiscardEmptySections:allow_only_comments=True", + "DiscardEmptySections", + ["allow_only_comments=True"], + ), + ( + "DiscardEmptySections;allow_only_comments=True", + "DiscardEmptySections", + ["allow_only_comments=True"], + ), ( "DiscardEmptySections;allow_only_comments=True:my_var=1", "DiscardEmptySections", ["allow_only_comments=True:my_var=1"], ), - (r"C:\path\to\module\transformer:my_variable=1", r"C:\path\to\module\transformer", ["my_variable=1"]), + ( + r"C:\path\to\module\transformer:my_variable=1", + r"C:\path\to\module\transformer", + ["my_variable=1"], + ), (__file__, __file__, []), ], ) diff --git a/tests/utest/utils.py b/tests/utest/utils.py index dd7ee5c8..fce4a696 100644 --- a/tests/utest/utils.py +++ b/tests/utest/utils.py @@ -21,7 +21,7 @@ def run_tidy( output_path = str(Path(Path(__file__).parent, "actual", output)) else: output_path = str(Path(Path(__file__).parent, "actual", "tmp")) - arguments = ["--output", output_path] + arguments + arguments = ["--output", output_path, *arguments] result = runner.invoke(cli, arguments, input=std_in) if result.exit_code != exit_code: print(result.output)