diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:06:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:06:49 +0000 |
commit | 2fe34b6444502079dc0b84365ce82dbc92de308e (patch) | |
tree | 8fedcab52bbbc3db6c5aa909a88a7a7b81685018 /src | |
parent | Initial commit. (diff) | |
download | ansible-lint-1f847810e1dcffeab102ff853e50a09833fad025.tar.xz ansible-lint-1f847810e1dcffeab102ff853e50a09833fad025.zip |
Adding upstream version 6.17.2.upstream/6.17.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src')
147 files changed, 26248 insertions, 0 deletions
diff --git a/src/ansiblelint/__init__.py b/src/ansiblelint/__init__.py new file mode 100644 index 0000000..ddecc13 --- /dev/null +++ b/src/ansiblelint/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +"""Main ansible-lint package.""" +from __future__ import annotations + +from ansiblelint.version import __version__ + +__all__ = ("__version__",) diff --git a/src/ansiblelint/__main__.py b/src/ansiblelint/__main__.py new file mode 100755 index 0000000..af434d0 --- /dev/null +++ b/src/ansiblelint/__main__.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +"""Command line implementation.""" + +from __future__ import annotations + +import errno +import logging +import os +import pathlib +import shutil +import site +import sys +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, TextIO + +from ansible_compat.prerun import get_cache_dir +from filelock import FileLock, Timeout +from rich.markup import escape + +from ansiblelint import cli +from ansiblelint._mockings import _perform_mockings_cleanup +from ansiblelint.app import get_app +from ansiblelint.color import ( + console, + console_options, + console_stderr, + reconfigure, + render_yaml, +) +from ansiblelint.config import ( + Options, + get_deps_versions, + get_version_warning, + log_entries, + options, +) +from ansiblelint.constants import RC +from ansiblelint.loaders import load_ignore_txt +from ansiblelint.skip_utils import normalize_tag +from ansiblelint.version import __version__ + +if TYPE_CHECKING: + # RulesCollection must be imported lazily or ansible gets imported too early. + + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import LintResult + + +_logger = logging.getLogger(__name__) +cache_dir_lock: None | FileLock = None + + +class LintLogHandler(logging.Handler): + """Custom handler that uses our rich stderr console.""" + + def emit(self, record: logging.LogRecord) -> None: + try: + msg = self.format(record) + console_stderr.print(f"[dim]{msg}[/dim]", highlight=False) + except RecursionError: # See issue 36272 + raise + except Exception: # pylint: disable=broad-exception-caught # noqa: BLE001 + self.handleError(record) + + +def initialize_logger(level: int = 0) -> None: + """Set up the global logging level based on the verbosity number.""" + # We are about to act on the root logger, which defaults to logging.WARNING. + # That is where our 0 (default) value comes from. + verbosity_map = { + -2: logging.CRITICAL, + -1: logging.ERROR, + 0: logging.WARNING, + 1: logging.INFO, + 2: logging.DEBUG, + } + + handler = LintLogHandler() + formatter = logging.Formatter("%(levelname)-8s %(message)s") + handler.setFormatter(formatter) + logger = logging.getLogger() + logger.addHandler(handler) + # Unknown logging level is treated as DEBUG + logging_level = verbosity_map.get(level, logging.DEBUG) + logger.setLevel(logging_level) + logging.captureWarnings(True) # pass all warnings.warn() messages through logging + # Use module-level _logger instance to validate it + _logger.debug("Logging initialized to level %s", logging_level) + + +def initialize_options(arguments: list[str] | None = None) -> None: + """Load config options and store them inside options module.""" + new_options = cli.get_config(arguments or []) + new_options.cwd = pathlib.Path.cwd() + + if new_options.colored is None: + new_options.colored = should_do_markup() + + # persist loaded configuration inside options module + for k, v in new_options.__dict__.items(): + setattr(options, k, v) + + # rename deprecated ids/tags to newer names + options.tags = [normalize_tag(tag) for tag in options.tags] + options.skip_list = [normalize_tag(tag) for tag in options.skip_list] + options.warn_list = [normalize_tag(tag) for tag in options.warn_list] + + options.configured = True + options.cache_dir = get_cache_dir(pathlib.Path(options.project_dir)) + + # add a lock file so we do not have two instances running inside at the same time + if options.cache_dir: + options.cache_dir.mkdir(parents=True, exist_ok=True) + + if not options.offline: # pragma: no cover + cache_dir_lock = FileLock( # pylint: disable=redefined-outer-name + f"{options.cache_dir}/.lock", + ) + try: + cache_dir_lock.acquire(timeout=180) + except Timeout: # pragma: no cover + _logger.error( + "Timeout waiting for another instance of ansible-lint to release the lock.", + ) + sys.exit(RC.LOCK_TIMEOUT) + + # Avoid extra output noise from Ansible about using devel versions + if "ANSIBLE_DEVEL_WARNING" not in os.environ: # pragma: no branch + os.environ["ANSIBLE_DEVEL_WARNING"] = "false" + + +def _do_list(rules: RulesCollection) -> int: + # On purpose lazy-imports to avoid pre-loading Ansible + # pylint: disable=import-outside-toplevel + from ansiblelint.generate_docs import rules_as_md, rules_as_rich, rules_as_str + + if options.list_rules: + _rule_format_map: dict[str, Callable[..., Any]] = { + "brief": rules_as_str, + "full": rules_as_rich, + "md": rules_as_md, + } + + console.print( + _rule_format_map.get(options.format, rules_as_str)(rules), + highlight=False, + ) + return 0 + + if options.list_tags: + console.print(render_yaml(rules.list_tags())) + return 0 + + # we should not get here! + return 1 + + +# noinspection PyShadowingNames +def _do_transform(result: LintResult, opts: Options) -> None: + """Create and run Transformer.""" + if "yaml" in opts.skip_list: + # The transformer rewrites yaml files, but the user requested to skip + # the yaml rule or anything tagged with "yaml", so there is nothing to do. + return + + # On purpose lazy-imports to avoid loading transforms unless requested + # pylint: disable=import-outside-toplevel + from ansiblelint.transformer import Transformer + + transformer = Transformer(result, options) + + # this will mark any matches as fixed if the transforms repaired the issue + transformer.run() + + +def support_banner() -> None: + """Display support banner when running on unsupported platform.""" + if sys.version_info < (3, 9, 0): # pragma: no cover + prefix = "::warning::" if "GITHUB_ACTION" in os.environ else "WARNING: " + console_stderr.print( + f"{prefix}ansible-lint is no longer tested under Python {sys.version_info.major}.{sys.version_info.minor} and will soon require 3.9. Do not report bugs for this version.", + style="bold red", + ) + + +# pylint: disable=too-many-statements,too-many-locals +def main(argv: list[str] | None = None) -> int: + """Linter CLI entry point.""" + # alter PATH if needed (venv support) + path_inject() + + if argv is None: # pragma: no cover + argv = sys.argv + initialize_options(argv[1:]) + + console_options["force_terminal"] = options.colored + reconfigure(console_options) + + if options.version: + deps = get_deps_versions() + msg = f"ansible-lint [repr.number]{__version__}[/] using[dim]" + for k, v in deps.items(): + msg += f" {escape(k)}:[repr.number]{v}[/]" + msg += "[/]" + console.print(msg, markup=True, highlight=False) + msg = get_version_warning() + if msg: + console.print(msg) + support_banner() + sys.exit(0) + else: + support_banner() + + initialize_logger(options.verbosity) + for level, message in log_entries: + _logger.log(level, message) + _logger.debug("Options: %s", options) + _logger.debug("CWD: %s", Path.cwd()) + + if not options.offline: + # pylint: disable=import-outside-toplevel + from ansiblelint.schemas.__main__ import refresh_schemas + + refresh_schemas() + + # pylint: disable=import-outside-toplevel + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import _get_matches + + if options.list_profiles: + from ansiblelint.generate_docs import profiles_as_rich + + console.print(profiles_as_rich()) + return 0 + + app = get_app(offline=None) # to be sure we use the offline value from settings + rules = RulesCollection( + options.rulesdirs, + profile_name=options.profile, + app=app, + options=options, + ) + + if options.list_rules or options.list_tags: + return _do_list(rules) + + if isinstance(options.tags, str): + options.tags = options.tags.split(",") # pragma: no cover + result = _get_matches(rules, options) + + if options.write_list: + ruamel_safe_version = "0.17.26" + from packaging.version import Version + from ruamel.yaml import __version__ as ruamel_yaml_version_str + + if Version(ruamel_safe_version) > Version(ruamel_yaml_version_str): + _logger.warning( + "We detected use of `--write` feature with a buggy ruamel-yaml %s library instead of >=%s, upgrade it before reporting any bugs like dropped comments.", + ruamel_yaml_version_str, + ruamel_safe_version, + ) + _do_transform(result, options) + + mark_as_success = True + + if options.strict and result.matches: + mark_as_success = False + + # Remove skip_list items from the result + result.matches = [m for m in result.matches if m.tag not in app.options.skip_list] + # Mark matches as ignored inside ignore file + ignore_map = load_ignore_txt(options.ignore_file) + for match in result.matches: + if match.tag in ignore_map[match.filename]: + match.ignored = True + + app.render_matches(result.matches) + + _perform_mockings_cleanup(app.options) + if cache_dir_lock: + cache_dir_lock.release() + pathlib.Path(cache_dir_lock.lock_file).unlink(missing_ok=True) + if options.mock_filters: + _logger.warning( + "The following filters were mocked during the run: %s", + ",".join(options.mock_filters), + ) + + return app.report_outcome(result, mark_as_success=mark_as_success) + + +def _run_cli_entrypoint() -> None: + """Invoke the main entrypoint with current CLI args. + + This function also processes the runtime exceptions. + """ + try: + sys.exit(main(sys.argv)) + except OSError as exc: + # NOTE: Only "broken pipe" is acceptable to ignore + if exc.errno != errno.EPIPE: # pragma: no cover + raise + except KeyboardInterrupt: # pragma: no cover + sys.exit(RC.EXIT_CONTROL_C) + except RuntimeError as exc: # pragma: no cover + raise SystemExit(exc) from exc + + +def path_inject() -> None: + """Add python interpreter path to top of PATH to fix outside venv calling.""" + # This make it possible to call ansible-lint that was installed inside a + # virtualenv without having to pre-activate it. Otherwise subprocess will + # either fail to find ansible executables or call the wrong ones. + # + # This must be run before we do run any subprocesses, and loading config + # does this as part of the ansible detection. + paths = [x for x in os.environ.get("PATH", "").split(os.pathsep) if x] + + # Expand ~ in PATH as it known to break many tools + expanded = False + for idx, path in enumerate(paths): + if path.startswith("~"): # pragma: no cover + paths[idx] = str(Path(path).expanduser()) + expanded = True + if expanded: # pragma: no cover + print( # noqa: T201 + "WARNING: PATH altered to expand ~ in it. Read https://stackoverflow.com/a/44704799/99834 and correct your system configuration.", + file=sys.stderr, + ) + + inject_paths = [] + + userbase_bin_path = Path(site.getuserbase()) / "bin" + if ( + str(userbase_bin_path) not in paths + and (userbase_bin_path / "bin" / "ansible").exists() + ): + inject_paths.append(str(userbase_bin_path)) + + py_path = Path(sys.executable).parent + if str(py_path) not in paths and (py_path / "ansible").exists(): + inject_paths.append(str(py_path)) + + if not os.environ.get("PYENV_VIRTUAL_ENV", None): + if inject_paths: + print( # noqa: T201 + f"WARNING: PATH altered to include {', '.join(inject_paths)} :: This is usually a sign of broken local setup, which can cause unexpected behaviors.", + file=sys.stderr, + ) + if inject_paths or expanded: + os.environ["PATH"] = os.pathsep.join([*inject_paths, *paths]) + + # We do know that finding ansible in PATH does not guarantee that it is + # functioning or that is in fact the same version that was installed as + # our dependency, but addressing this would be done by ansible-compat. + for cmd in ("ansible",): + if not shutil.which(cmd): + msg = f"Failed to find runtime dependency '{cmd}' in PATH" + raise RuntimeError(msg) + + +# Based on Ansible implementation +def to_bool(value: Any) -> bool: # pragma: no cover + """Return a bool for the arg.""" + if value is None or isinstance(value, bool): + return bool(value) + if isinstance(value, str): + value = value.lower() + if value in ("yes", "on", "1", "true", 1): + return True + return False + + +def should_do_markup(stream: TextIO = sys.stdout) -> bool: # pragma: no cover + """Decide about use of ANSI colors.""" + py_colors = None + + # https://xkcd.com/927/ + for env_var in ["PY_COLORS", "CLICOLOR", "FORCE_COLOR", "ANSIBLE_FORCE_COLOR"]: + value = os.environ.get(env_var, None) + if value is not None: + py_colors = to_bool(value) + break + + # If deliberately disabled colors + if os.environ.get("NO_COLOR", None): + return False + + # User configuration requested colors + if py_colors is not None: + return to_bool(py_colors) + + term = os.environ.get("TERM", "") + if "xterm" in term: + return True + + if term == "dumb": + return False + + # Use tty detection logic as last resort because there are numerous + # factors that can make isatty return a misleading value, including: + # - stdin.isatty() is the only one returning true, even on a real terminal + # - stderr returning false if user user uses a error stream coloring solution + return stream.isatty() + + +if __name__ == "__main__": + _run_cli_entrypoint() diff --git a/src/ansiblelint/_internal/__init__.py b/src/ansiblelint/_internal/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/ansiblelint/_internal/__init__.py diff --git a/src/ansiblelint/_internal/internal_error.md b/src/ansiblelint/_internal/internal_error.md new file mode 100644 index 0000000..8db5e5e --- /dev/null +++ b/src/ansiblelint/_internal/internal_error.md @@ -0,0 +1,43 @@ +# internal-error + +This error can also be caused by internal bugs but also by custom rules. +Instead of just stopping tool execution, we generate the errors and continue +processing other files. This allows users to add this rule to their `warn_list` +until the root cause is fixed. + +Keep in mind that once an `internal-error` is found on a specific file, no +other rules will be executed on that same file. + +In almost all cases you will see more detailed information regarding the +original error or runtime exception that triggered this rule. + +If these files are broken on purpose, like some test fixtures, you need to add +them to the `exclude_paths`. + +## Problematic code + +```yaml +--- +- name: Some title {{ # <-- Ansible will not load this invalid jinja template + hosts: localhost + tasks: [] +``` + +## Correct code + +```yaml +--- +- name: Some title + hosts: localhost + tasks: [] +``` + +## ERROR! No hosts matched the subscripted pattern + +If you see this error, it means that you tried to index a host group variable +that is using an index above its size. + +Instead of doing something like `hosts: all[1]` which assumes that you have +at least two hosts in your current inventory, you better write something like +`hosts: "{{ all[1] | default([]) }}`, which is safe and do not produce runtime +errors. Use safe fallbacks to make your code more resilient. diff --git a/src/ansiblelint/_internal/load-failure.md b/src/ansiblelint/_internal/load-failure.md new file mode 100644 index 0000000..f88bfa9 --- /dev/null +++ b/src/ansiblelint/_internal/load-failure.md @@ -0,0 +1,17 @@ +# load-failure + +"Linter failed to process a file, possible invalid file. Possible reasons: + +- contains unsupported encoding (only UTF-8 is supported) +- not an Ansible file +- it contains some unsupported custom YAML objects (`!!` prefix) +- it was not able to decrypt an inline `!vault` block. + +This violation **is not** skippable, so it cannot be added to the `warn_list` or +the `skip_list`. If a vault decryption issue cannot be avoided, the offending +file can be added to `exclude_paths` configuration. + +Possible errors codes: + +- `load-failure[not-found]` - Indicates that one argument file or folder was not + found on disk. diff --git a/src/ansiblelint/_internal/parser-error.md b/src/ansiblelint/_internal/parser-error.md new file mode 100644 index 0000000..f6c7649 --- /dev/null +++ b/src/ansiblelint/_internal/parser-error.md @@ -0,0 +1,5 @@ +# parser-error + +**AnsibleParserError.** + +Ansible parser fails; this usually indicates an invalid file. diff --git a/src/ansiblelint/_internal/rules.py b/src/ansiblelint/_internal/rules.py new file mode 100644 index 0000000..acaf0f3 --- /dev/null +++ b/src/ansiblelint/_internal/rules.py @@ -0,0 +1,209 @@ +"""Internally used rule classes.""" +from __future__ import annotations + +import inspect +import logging +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import RULE_DOC_URL + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.rules import RulesCollection + from ansiblelint.utils import Task + +_logger = logging.getLogger(__name__) +LOAD_FAILURE_MD = """\ +# load-failure + +"Linter failed to process a file, possible invalid file. Possible reasons: + +* contains unsupported encoding (only UTF-8 is supported) +* not an Ansible file +* it contains some unsupported custom YAML objects (`!!` prefix) +* it was not able to decrypt an inline `!vault` block. + +This violation **is not** skippable, so it cannot be added to the `warn_list` +or the `skip_list`. If a vault decryption issue cannot be avoided, the +offending file can be added to `exclude_paths` configuration. +""" + + +# Derived rules are likely to want to access class members, so: +# pylint: disable=unused-argument +class BaseRule: + """Root class used by Rules.""" + + id: str = "" + tags: list[str] = [] + description: str = "" + version_added: str = "" + severity: str = "" + link: str = "" + has_dynamic_tags: bool = False + needs_raw_task: bool = False + # We use _order to sort rules and to ensure that some run before others, + # _order 0 for internal rules + # _order 1 for rules that check that data can be loaded + # _order 5 implicit for normal rules + _order: int = 5 + _help: str | None = None + # Added when a rule is registered into a collection, gives access to options + _collection: RulesCollection | None = None + + @property + def help(self) -> str: # noqa: A003 + """Return a help markdown string for the rule.""" + if self._help is None: + self._help = "" + md_file = ( + Path(inspect.getfile(self.__class__)).parent + / f"{self.id.replace('-', '_')}.md" + ) + if md_file.exists(): + self._help = md_file.read_text(encoding="utf-8") + return self._help + + @property + def url(self) -> str: + """Return rule documentation url.""" + url = self.link + if not url: # pragma: no cover + url = RULE_DOC_URL + if self.id: + url += self.id + "/" + return url + + @property + def shortdesc(self) -> str: + """Return the short description of the rule, basically the docstring.""" + return self.__doc__ or "" + + def getmatches(self, file: Lintable) -> list[MatchError]: + """Return all matches while ignoring exceptions.""" + matches = [] + if not file.path.is_dir(): + for method in [self.matchlines, self.matchtasks, self.matchyaml]: + try: + matches.extend(method(file)) + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 + _logger.warning( + "Ignored exception from %s.%s while processing %s: %s", + self.__class__.__name__, + method, + str(file), + exc, + ) + else: + matches.extend(self.matchdir(file)) + return matches + + def matchlines(self, file: Lintable) -> list[MatchError]: + """Return matches found for a specific line.""" + return [] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str | MatchError | list[MatchError]: + """Confirm if current rule is matching a specific task. + + If ``needs_raw_task`` (a class level attribute) is ``True``, then + the original task (before normalization) will be made available under + ``task["__raw_task__"]``. + """ + return False + + def matchtasks(self, file: Lintable) -> list[MatchError]: + """Return matches for a tasks file.""" + return [] + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return matches found for a specific YAML text.""" + return [] + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific playbook.""" + return [] + + def matchdir(self, lintable: Lintable) -> list[MatchError]: + """Return matches for lintable folders.""" + return [] + + def verbose(self) -> str: + """Return a verbose representation of the rule.""" + return self.id + ": " + self.shortdesc + "\n " + self.description + + def match(self, line: str) -> bool | str: + """Confirm if current rule matches the given string.""" + return False + + def __lt__(self, other: BaseRule) -> bool: + """Enable us to sort rules by their id.""" + return (self._order, self.id) < (other._order, other.id) + + def __repr__(self) -> str: + """Return a AnsibleLintRule instance representation.""" + return self.id + ": " + self.shortdesc + + @classmethod + def ids(cls) -> dict[str, str]: + """Return a dictionary ids and their messages. + + This is used by the ``--list-tags`` option to ansible-lint. + """ + return getattr(cls, "_ids", {cls.id: cls.shortdesc}) + + +# pylint: enable=unused-argument + + +class RuntimeErrorRule(BaseRule): + """Unexpected internal error.""" + + id = "internal-error" + shortdesc = "Unexpected internal error" + severity = "VERY_HIGH" + tags = ["core"] + version_added = "v5.0.0" + _order = 0 + + +class AnsibleParserErrorRule(BaseRule): + """AnsibleParserError.""" + + id = "parser-error" + description = "Ansible parser fails; this usually indicates an invalid file." + severity = "VERY_HIGH" + tags = ["core"] + version_added = "v5.0.0" + _order = 0 + + +class LoadingFailureRule(BaseRule): + """Failed to load or parse file.""" + + id = "load-failure" + description = "Linter failed to process a file, possible invalid file." + severity = "VERY_HIGH" + tags = ["core", "unskippable"] + version_added = "v4.3.0" + _help = LOAD_FAILURE_MD + _order = 0 + _ids = { + "load-failure[not-found]": "File not found", + } + + +class WarningRule(BaseRule): + """Other warnings detected during run.""" + + id = "warning" + severity = "LOW" + # should remain experimental as that would keep it warning only + tags = ["core", "experimental"] + version_added = "v6.8.0" + _order = 0 diff --git a/src/ansiblelint/_internal/warning.md b/src/ansiblelint/_internal/warning.md new file mode 100644 index 0000000..97d2577 --- /dev/null +++ b/src/ansiblelint/_internal/warning.md @@ -0,0 +1,9 @@ +# warning + +`warning` is a special type of internal rule that is used to report generic +runtime warnings found during execution. As stated by its name, they are not +counted as errors, so they do not influence the final outcome. + +- `warning[raw-non-string]` indicates that you are using + `[raw](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/raw_module.html#ansible-collections-ansible-builtin-raw-module)` + module with non-string arguments, which is not supported by Ansible. diff --git a/src/ansiblelint/_mockings.py b/src/ansiblelint/_mockings.py new file mode 100644 index 0000000..e0482b7 --- /dev/null +++ b/src/ansiblelint/_mockings.py @@ -0,0 +1,125 @@ +"""Utilities for mocking ansible modules and roles.""" +from __future__ import annotations + +import contextlib +import logging +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.constants import ANSIBLE_MOCKED_MODULE, RC + +if TYPE_CHECKING: + from pathlib import Path + + from ansiblelint.config import Options + +_logger = logging.getLogger(__name__) + + +def _make_module_stub(module_name: str, options: Options) -> None: + if not options.cache_dir: + msg = "Cache directory not set" + raise RuntimeError(msg) + # a.b.c is treated a collection + if re.match(r"^(\w+|\w+\.\w+\.[\.\w]+)$", module_name): + parts = module_name.split(".") + if len(parts) < 3: + path = options.cache_dir / "modules" + module_file = f"{options.cache_dir}/modules/{module_name}.py" + namespace = None + collection = None + else: + namespace = parts[0] + collection = parts[1] + path = ( + options.cache_dir + / "collections" + / "ansible_collections" + / namespace + / collection + / "plugins" + / "modules" + / ("/".join(parts[2:-1])) + ) + module_file = f"{path}/{parts[-1]}.py" + path.mkdir(exist_ok=True, parents=True) + _write_module_stub( + filename=module_file, + name=module_file, + namespace=namespace, + collection=collection, + ) + else: + _logger.error("Config error: %s is not a valid module name.", module_name) + sys.exit(RC.INVALID_CONFIG) + + +def _write_module_stub( + filename: str, + name: str, + namespace: str | None = None, + collection: str | None = None, +) -> None: + """Write module stub to disk.""" + body = ANSIBLE_MOCKED_MODULE.format( + name=name, + collection=collection, + namespace=namespace, + ) + with open(filename, "w", encoding="utf-8") as f: + f.write(body) + + +def _perform_mockings(options: Options) -> None: + """Mock modules and roles.""" + path: Path + if not options.cache_dir: + msg = "Cache directory not set" + raise RuntimeError(msg) + for role_name in options.mock_roles: + if re.match(r"\w+\.\w+\.\w+$", role_name): + namespace, collection, role_dir = role_name.split(".") + path = ( + options.cache_dir + / "collections" + / "ansible_collections" + / namespace + / collection + / "roles" + / role_dir + ) + else: + path = options.cache_dir / "roles" / role_name + # Avoid error from makedirs if destination is a broken symlink + if path.is_symlink() and not path.exists(): # pragma: no cover + _logger.warning("Removed broken symlink from %s", path) + path.unlink(missing_ok=True) + path.mkdir(exist_ok=True, parents=True) + + if options.mock_modules: + for module_name in options.mock_modules: + _make_module_stub(module_name=module_name, options=options) + + +def _perform_mockings_cleanup(options: Options) -> None: + """Clean up mocked modules and roles.""" + if not options.cache_dir: + msg = "Cache directory not set" + raise RuntimeError(msg) + for role_name in options.mock_roles: + if re.match(r"\w+\.\w+\.\w+$", role_name): + namespace, collection, role_dir = role_name.split(".") + path = ( + options.cache_dir + / "collections" + / "ansible_collections" + / namespace + / collection + / "roles" + / role_dir + ) + else: + path = options.cache_dir / "roles" / role_name + with contextlib.suppress(OSError): + path.unlink() diff --git a/src/ansiblelint/app.py b/src/ansiblelint/app.py new file mode 100644 index 0000000..52581b3 --- /dev/null +++ b/src/ansiblelint/app.py @@ -0,0 +1,411 @@ +"""Application.""" +from __future__ import annotations + +import copy +import itertools +import logging +import os +from functools import lru_cache +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from ansible_compat.runtime import Runtime +from rich.markup import escape +from rich.table import Table + +from ansiblelint import formatters +from ansiblelint._mockings import _perform_mockings +from ansiblelint.color import console, console_stderr, render_yaml +from ansiblelint.config import PROFILES, Options, get_version_warning +from ansiblelint.config import options as default_options +from ansiblelint.constants import RC, RULE_DOC_URL +from ansiblelint.loaders import IGNORE_FILE +from ansiblelint.stats import SummarizedResults, TagStats + +if TYPE_CHECKING: + from ansiblelint._internal.rules import BaseRule + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.runner import LintResult + + +_logger = logging.getLogger(__package__) + + +class App: + """App class represents an execution of the linter.""" + + def __init__(self, options: Options): + """Construct app run based on already loaded configuration.""" + options.skip_list = _sanitize_list_options(options.skip_list) + options.warn_list = _sanitize_list_options(options.warn_list) + + self.options = options + + formatter_factory = choose_formatter_factory(options) + self.formatter = formatter_factory(options.cwd, options.display_relative_path) + + # Without require_module, our _set_collections_basedir may fail + self.runtime = Runtime(isolated=True, require_module=True) + + def render_matches(self, matches: list[MatchError]) -> None: + """Display given matches (if they are not fixed).""" + matches = [match for match in matches if not match.fixed] + + if isinstance( + self.formatter, + (formatters.CodeclimateJSONFormatter, formatters.SarifFormatter), + ): + # If formatter CodeclimateJSONFormatter or SarifFormatter is chosen, + # then print only the matches in JSON + console.print( + self.formatter.format_result(matches), + markup=False, + highlight=False, + ) + return + + ignored_matches = [match for match in matches if match.ignored] + fatal_matches = [match for match in matches if not match.ignored] + # Displayed ignored matches first + if ignored_matches: + _logger.warning( + "Listing %s violation(s) marked as ignored, likely already known", + len(ignored_matches), + ) + for match in ignored_matches: + if match.ignored: + # highlight must be off or apostrophes may produce unexpected results + console.print(self.formatter.apply(match), highlight=False) + if fatal_matches: + _logger.warning( + "Listing %s violation(s) that are fatal", + len(fatal_matches), + ) + for match in fatal_matches: + if not match.ignored: + console.print(self.formatter.apply(match), highlight=False) + + # If run under GitHub Actions we also want to emit output recognized by it. + if os.getenv("GITHUB_ACTIONS") == "true" and os.getenv("GITHUB_WORKFLOW"): + _logger.info( + "GitHub Actions environment detected, adding annotations output...", + ) + formatter = formatters.AnnotationsFormatter(self.options.cwd, True) + for match in itertools.chain(fatal_matches, ignored_matches): + console_stderr.print( + formatter.apply(match), + markup=False, + highlight=False, + ) + + # If sarif_file is set, we also dump the results to a sarif file. + if self.options.sarif_file: + sarif = formatters.SarifFormatter(self.options.cwd, True) + json = sarif.format_result(matches) + with Path.open( + self.options.sarif_file, + "w", + encoding="utf-8", + ) as sarif_file: + sarif_file.write(json) + + def count_results(self, matches: list[MatchError]) -> SummarizedResults: + """Count failures and warnings in matches.""" + result = SummarizedResults() + + for match in matches: + # any ignores match counts as a warning + if match.ignored: + result.warnings += 1 + continue + # tag can include a sub-rule id: `yaml[document-start]` + # rule.id is the generic rule id: `yaml` + # *rule.tags is the list of the rule's tags (categories): `style` + if match.tag not in result.tag_stats: + result.tag_stats[match.tag] = TagStats( + tag=match.tag, + count=1, + associated_tags=match.rule.tags, + ) + else: + result.tag_stats[match.tag].count += 1 + + if {match.tag, match.rule.id, *match.rule.tags}.isdisjoint( + self.options.warn_list, + ): + # not in warn_list + if match.fixed: + result.fixed_failures += 1 + else: + result.failures += 1 + else: + result.tag_stats[match.tag].warning = True + if match.fixed: + result.fixed_warnings += 1 + else: + result.warnings += 1 + return result + + @staticmethod + def count_lintables(files: set[Lintable]) -> tuple[int, int]: + """Count total and modified files.""" + files_count = len(files) + changed_files_count = len([file for file in files if file.updated]) + return files_count, changed_files_count + + @staticmethod + def _get_matched_skippable_rules( + matches: list[MatchError], + ) -> dict[str, BaseRule]: + """Extract the list of matched rules, if skippable, from the list of matches.""" + matches_unignored = [match for match in matches if not match.ignored] + # match.tag is more specialized than match.rule.id + matched_rules = { + match.tag or match.rule.id: match.rule for match in matches_unignored + } + # remove unskippable rules from the list + for rule_id in list(matched_rules.keys()): + if "unskippable" in matched_rules[rule_id].tags: + matched_rules.pop(rule_id) + return matched_rules + + def report_outcome( + self, + result: LintResult, + *, + mark_as_success: bool = False, + ) -> int: + """Display information about how to skip found rules. + + Returns exit code, 2 if errors were found, 0 when only warnings were found. + """ + msg = "" + + summary = self.count_results(result.matches) + files_count, changed_files_count = self.count_lintables(result.files) + + matched_rules = self._get_matched_skippable_rules(result.matches) + + if matched_rules and self.options.generate_ignore: + # ANSIBLE_LINT_IGNORE_FILE environment variable overrides default + # dumping location in linter and is not documented or supported. We + # use this only for testing purposes. + ignore_file_path = Path( + os.environ.get("ANSIBLE_LINT_IGNORE_FILE", IGNORE_FILE.default), + ) + console_stderr.print(f"Writing ignore file to {ignore_file_path}") + lines = set() + for rule in result.matches: + lines.add(f"{rule.filename} {rule.tag}\n") + with ignore_file_path.open("w", encoding="utf-8") as ignore_file: + ignore_file.write( + "# This file contains ignores rule violations for ansible-lint\n", + ) + ignore_file.writelines(sorted(lines)) + elif matched_rules and not self.options.quiet: + console_stderr.print( + "Read [link=https://ansible-lint.readthedocs.io/configuring/#ignoring-rules-for-entire-files]documentation[/link] for instructions on how to ignore specific rule violations.", + ) + + # Do not deprecate the old tags just yet. Why? Because it is not currently feasible + # to migrate old tags to new tags. There are a lot of things out there that still + # use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we + # replace the old tags, those tools will report warnings. If we do not replace them, + # ansible-lint 5 will report warnings. + # + # We can do the deprecation once the ecosystem caught up at least a bit. + # for k, v in used_old_tags.items(): + # _logger.warning( + # "error in the future.", + # k, + # v, + + if self.options.write_list and "yaml" in self.options.skip_list: + _logger.warning( + "You specified '--write', but no files can be modified " + "because 'yaml' is in 'skip_list'.", + ) + + if mark_as_success and summary.failures: + mark_as_success = False + + if not self.options.quiet: + console_stderr.print(render_yaml(msg)) + self.report_summary( + summary, + changed_files_count, + files_count, + is_success=mark_as_success, + ) + if mark_as_success: + if not files_count: + # success without any file being analyzed is reported as failure + # to match match, preventing accidents where linter was running + # not doing anything due to misconfiguration. + _logger.critical( + "Linter finished without analyzing any file, check configuration and arguments given.", + ) + return RC.NO_FILES_MATCHED + return RC.SUCCESS + return RC.VIOLATIONS_FOUND + + def report_summary( # pylint: disable=too-many-locals # noqa: C901 + self, + summary: SummarizedResults, + changed_files_count: int, + files_count: int, + is_success: bool, + ) -> None: + """Report match and file counts.""" + # sort the stats by profiles + idx = 0 + rule_order = {} + + for profile, profile_config in PROFILES.items(): + for rule in profile_config["rules"]: + rule_order[rule] = (idx, profile) + idx += 1 + _logger.debug("Determined rule-profile order: %s", rule_order) + failed_profiles = set() + for tag, tag_stats in summary.tag_stats.items(): + if tag in rule_order: + tag_stats.order, tag_stats.profile = rule_order.get(tag, (idx, "")) + elif "[" in tag: + tag_stats.order, tag_stats.profile = rule_order.get( + tag.split("[")[0], + (idx, ""), + ) + if tag_stats.profile: + failed_profiles.add(tag_stats.profile) + summary.sort() + + if changed_files_count: + console_stderr.print(f"Modified {changed_files_count} files.") + + # determine which profile passed + summary.passed_profile = "" + passed_profile_count = 0 + for profile in PROFILES: + if profile in failed_profiles: + break + if profile != summary.passed_profile: + summary.passed_profile = profile + passed_profile_count += 1 + + stars = "" + if summary.tag_stats: + table = Table( + title="Rule Violation Summary", + collapse_padding=True, + box=None, + show_lines=False, + ) + table.add_column("count", justify="right") + table.add_column("tag") + table.add_column("profile") + table.add_column("rule associated tags") + for tag, stats in summary.tag_stats.items(): + table.add_row( + str(stats.count), + f"[link={RULE_DOC_URL}{ tag.split('[')[0] }]{escape(tag)}[/link]", + stats.profile, + f"{', '.join(stats.associated_tags)}{' (warning)' if stats.warning else ''}", + style="yellow" if stats.warning else "red", + ) + # rate stars for the top 5 profiles (min would not get + rating = 5 - (len(PROFILES.keys()) - passed_profile_count) + if 0 < rating < 6: + stars = f" Rating: {rating}/5 star" + + console_stderr.print(table) + console_stderr.print() + + msg = "[green]Passed[/]" if is_success else "[red][bold]Failed[/][/]" + + msg += f": {summary.failures} failure(s), {summary.warnings} warning(s)" + if summary.fixed: + msg += f", and fixed {summary.fixed} issue(s)" + msg += f" on {files_count} files." + + # Now we add some information about required and passed profile + if self.options.profile: + msg += f" Profile '{self.options.profile}' was required" + if summary.passed_profile: + msg += f", but only '{summary.passed_profile}' profile passed." + else: + msg += "." + elif summary.passed_profile: + msg += f" Last profile that met the validation criteria was '{summary.passed_profile}'." + + if stars: + msg += stars + + # on offline mode and when run under pre-commit we do not want to + # check for updates. + if not self.options.offline and os.environ.get("PRE_COMMIT", "0") != "1": + version_warning = get_version_warning() + if version_warning: + msg += f"\n{version_warning}" + + console_stderr.print(msg) + + +def choose_formatter_factory( + options_list: Options, +) -> type[formatters.BaseFormatter[Any]]: + """Select an output formatter based on the incoming command line arguments.""" + r: type[formatters.BaseFormatter[Any]] = formatters.Formatter + if options_list.format == "quiet": + r = formatters.QuietFormatter + elif options_list.format in ("json", "codeclimate"): + r = formatters.CodeclimateJSONFormatter + elif options_list.format == "sarif": + r = formatters.SarifFormatter + elif options_list.parseable or options_list.format == "pep8": + r = formatters.ParseableFormatter + return r + + +def _sanitize_list_options(tag_list: list[str]) -> list[str]: + """Normalize list options.""" + # expand comma separated entries + tags = set() + for tag in tag_list: + tags.update(str(tag).split(",")) + # remove duplicates, and return as sorted list + return sorted(set(tags)) + + +@lru_cache +def get_app(*, offline: bool | None = None) -> App: + """Return the application instance, caching the return value.""" + if offline is None: + offline = default_options.offline + + if default_options.offline != offline: + options = copy.deepcopy(default_options) + options.offline = offline + else: + options = default_options + + app = App(options=options) + # Make linter use the cache dir from compat + options.cache_dir = app.runtime.cache_dir + + role_name_check = 0 + if "role-name" in app.options.warn_list: + role_name_check = 1 + elif "role-name" in app.options.skip_list: + role_name_check = 2 + + # mocking must happen before prepare_environment or galaxy install might + # fail. + _perform_mockings(options=app.options) + app.runtime.prepare_environment( + install_local=(not offline), + offline=offline, + role_name_check=role_name_check, + ) + + return app diff --git a/src/ansiblelint/cli.py b/src/ansiblelint/cli.py new file mode 100644 index 0000000..c9178a7 --- /dev/null +++ b/src/ansiblelint/cli.py @@ -0,0 +1,636 @@ +"""CLI parser setup and helpers.""" +from __future__ import annotations + +import argparse +import logging +import os +import sys +from argparse import Namespace +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable + +from ansiblelint.config import ( + DEFAULT_KINDS, + DEFAULT_WARN_LIST, + PROFILES, + Options, + log_entries, +) +from ansiblelint.constants import CUSTOM_RULESDIR_ENVVAR, DEFAULT_RULESDIR, RC +from ansiblelint.file_utils import ( + Lintable, + abspath, + expand_path_vars, + find_project_root, + normpath, +) +from ansiblelint.loaders import IGNORE_FILE +from ansiblelint.schemas.main import validate_file_schema +from ansiblelint.yaml_utils import clean_json + +if TYPE_CHECKING: + from collections.abc import Sequence + + +_logger = logging.getLogger(__name__) +_PATH_VARS = [ + "rulesdir", +] + + +def expand_to_normalized_paths( + config: dict[str, Any], + base_dir: str | None = None, +) -> None: + """Mutate given config normalizing any path values in it.""" + # config can be None (-c /dev/null) + if not config: + return + base_dir = base_dir or os.getcwd() + for paths_var in _PATH_VARS: + if paths_var not in config: + continue # Cause we don't want to add a variable not present + + normalized_paths = [] + for path in config.pop(paths_var): + normalized_path = abspath(expand_path_vars(path), base_dir=base_dir) + + normalized_paths.append(normalized_path) + + config[paths_var] = normalized_paths + + +def load_config(config_file: str | None) -> tuple[dict[Any, Any], str | None]: + """Load configuration from disk.""" + config_path = None + + if config_file == "/dev/null": + _logger.debug("Skipping config file as it was set to /dev/null") + return {}, config_file + + if config_file: + config_path = os.path.abspath(config_file) + if not os.path.exists(config_path): + _logger.error("Config file not found '%s'", config_path) + sys.exit(RC.INVALID_CONFIG) + config_path = config_path or get_config_path() + if not config_path or not os.path.exists(config_path): + # a missing default config file should not trigger an error + return {}, None + + config_lintable = Lintable( + config_path, + kind="ansible-lint-config", + base_kind="text/yaml", + ) + + for error in validate_file_schema(config_lintable): + _logger.error("Invalid configuration file %s. %s", config_path, error) + sys.exit(RC.INVALID_CONFIG) + + config = clean_json(config_lintable.data) + if not isinstance(config, dict): + msg = "Schema failed to properly validate the config file." + raise RuntimeError(msg) + config["config_file"] = config_path + config_dir = os.path.dirname(config_path) + expand_to_normalized_paths(config, config_dir) + + return config, config_path + + +def get_config_path(config_file: str | None = None) -> str | None: + """Return local config file.""" + if config_file: + project_filenames = [config_file] + else: + project_filenames = [ + ".ansible-lint", + ".config/ansible-lint.yml", + ".config/ansible-lint.yaml", + ] + parent = tail = os.getcwd() + while tail: + for project_filename in project_filenames: + filename = os.path.abspath(os.path.join(parent, project_filename)) + if os.path.exists(filename): + return filename + if os.path.exists(os.path.abspath(os.path.join(parent, ".git"))): + # Avoid looking outside .git folders as we do not want end-up + # picking config files from upper level projects if current + # project has no config. + return None + (parent, tail) = os.path.split(parent) + return None + + +class AbspathArgAction(argparse.Action): + """Argparse action to convert relative paths to absolute paths.""" + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: Namespace, + values: str | Sequence[Any] | None, + option_string: str | None = None, + ) -> None: + if isinstance(values, (str, Path)): + values = [values] + if values: + normalized_values = [ + Path(expand_path_vars(str(path))).resolve() for path in values + ] + previous_values = getattr(namespace, self.dest, []) + setattr(namespace, self.dest, previous_values + normalized_values) + + +class WriteArgAction(argparse.Action): + """Argparse action to handle the --write flag with optional args.""" + + _default = "__default__" + + # noinspection PyShadowingBuiltins + def __init__( # pylint: disable=too-many-arguments,redefined-builtin + self, + option_strings: list[str], + dest: str, + nargs: int | str | None = None, + const: Any = None, + default: Any = None, + type: Callable[[str], Any] | None = None, # noqa: A002 + choices: list[Any] | None = None, + *, + required: bool = False, + help: str | None = None, # noqa: A002 + metavar: str | None = None, + ) -> None: + """Create the argparse action with WriteArg-specific defaults.""" + if nargs is not None: + msg = "nargs for WriteArgAction must not be set." + raise ValueError(msg) + if const is not None: + msg = "const for WriteArgAction must not be set." + raise ValueError(msg) + super().__init__( + option_strings=option_strings, + dest=dest, + nargs="?", # either 0 (--write) or 1 (--write=a,b,c) argument + const=self._default, # --write (no option) implicitly stores this + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: Namespace, + values: str | Sequence[Any] | None, + option_string: str | None = None, + ) -> None: + lintables = getattr(namespace, "lintables", None) + if not lintables and isinstance(values, str): + # args are processed in order. + # If --write is after lintables, then that is not ambiguous. + # But if --write comes first, then it might actually be a lintable. + maybe_lintable = Path(values) + if maybe_lintable.exists(): + namespace.lintables = [values] + values = [] + if isinstance(values, str): + values = values.split(",") + default = [self.const] if isinstance(self.const, str) else self.const + previous_values = getattr(namespace, self.dest, default) or default + if not values: + values = previous_values + elif previous_values != default: + values = previous_values + values + setattr(namespace, self.dest, values) + + @classmethod + def merge_write_list_config( + cls, + from_file: list[str], + from_cli: list[str], + ) -> list[str]: + """Combine the write_list from file config with --write CLI arg. + + Handles the implicit "all" when "__default__" is present and file config is empty. + """ + if not from_file or "none" in from_cli: + # --write is the same as --write=all + return ["all" if value == cls._default else value for value in from_cli] + # --write means use the config from the config file + from_cli = [value for value in from_cli if value != cls._default] + return from_file + from_cli + + +def get_cli_parser() -> argparse.ArgumentParser: + """Initialize an argument parser.""" + parser = argparse.ArgumentParser() + + listing_group = parser.add_mutually_exclusive_group() + listing_group.add_argument( + "-P", + "--list-profiles", + dest="list_profiles", + default=False, + action="store_true", + help="List all profiles, no formatting options available.", + ) + listing_group.add_argument( + "-L", + "--list-rules", + dest="list_rules", + default=False, + action="store_true", + help="List all the rules. For listing rules only the following formats " + "for argument -f are supported: {brief, full, md} with 'brief' as default.", + ) + listing_group.add_argument( + "-T", + "--list-tags", + dest="list_tags", + action="store_true", + help="List all the tags and the rules they cover. Increase the verbosity level " + "with `-v` to include 'opt-in' tag and its rules.", + ) + parser.add_argument( + "-f", + "--format", + dest="format", + default=None, + choices=[ + "brief", + # "plain", + "full", + "md", + "json", + "codeclimate", + "quiet", + "pep8", + "sarif", + ], + help="stdout formatting, json being an alias for codeclimate. (default: %(default)s)", + ) + parser.add_argument( + "--sarif-file", + default=None, + type=Path, + help="SARIF output file", + ) + parser.add_argument( + "-q", + dest="quiet", + default=0, + action="count", + help="quieter, reduce verbosity, can be specified twice.", + ) + parser.add_argument( + "--profile", + dest="profile", + default=None, + action="store", + choices=PROFILES.keys(), + help="Specify which rules profile to be used.", + ) + parser.add_argument( + "-p", + "--parseable", + dest="parseable", + default=False, + action="store_true", + help="parseable output, same as '-f pep8'", + ) + parser.add_argument( + "--project-dir", + dest="project_dir", + default=None, + help="Location of project/repository, autodetected based on location " + "of configuration file.", + ) + parser.add_argument( + "-r", + "--rules-dir", + action=AbspathArgAction, + dest="rulesdir", + default=[], + type=Path, + help="Specify custom rule directories. Add -R " + f"to keep using embedded rules from {DEFAULT_RULESDIR}", + ) + parser.add_argument( + "-R", + action="store_true", + default=False, + dest="use_default_rules", + help="Keep default rules when using -r", + ) + parser.add_argument( + "-s", + "--strict", + action="store_true", + default=False, + dest="strict", + help="Return non-zero exit code on warnings as well as errors", + ) + parser.add_argument( + "--write", + dest="write_list", + # this is a tri-state argument that takes an optional comma separated list: + action=WriteArgAction, + help="Allow ansible-lint to reformat YAML files and run rule transforms " + "(Reformatting YAML files standardizes spacing, quotes, etc. " + "A rule transform can fix or simplify fixing issues identified by that rule). " + "You can limit the effective rule transforms (the 'write_list') by passing a " + "keywords 'all' or 'none' or a comma separated list of rule ids or rule tags. " + "YAML reformatting happens whenever '--write' or '--write=' is used. " + "'--write' and '--write=all' are equivalent: they allow all transforms to run. " + "The effective list of transforms comes from 'write_list' in the config file, " + "followed whatever '--write' args are provided on the commandline. " + "'--write=none' resets the list of transforms to allow reformatting YAML " + "without running any of the transforms (ie '--write=none,rule-id' will " + "ignore write_list in the config file and only run the rule-id transform).", + ) + parser.add_argument( + "--show-relpath", + dest="display_relative_path", + action="store_false", + default=True, + help="Display path relative to CWD", + ) + parser.add_argument( + "-t", + "--tags", + dest="tags", + action="append", + default=[], + help="only check rules whose id/tags match these values", + ) + parser.add_argument( + "-v", + dest="verbosity", + action="count", + help="Increase verbosity level (-vv for more)", + default=0, + ) + parser.add_argument( + "-x", + "--skip-list", + dest="skip_list", + default=[], + action="append", + help="only check rules whose id/tags do not match these values. \ + e.g: --skip-list=name,run-once", + ) + parser.add_argument( + "--generate-ignore", + dest="generate_ignore", + action="store_true", + default=False, + help="Generate a text file '.ansible-lint-ignore' that ignores all found violations. Each line contains filename and rule id separated by a space.", + ) + parser.add_argument( + "-w", + "--warn-list", + dest="warn_list", + default=[], + action="append", + help="only warn about these rules, unless overridden in " + f"config file. Current version default value is: {', '.join(DEFAULT_WARN_LIST)}", + ) + parser.add_argument( + "--enable-list", + dest="enable_list", + default=[], + action="append", + help="activate optional rules by their tag name", + ) + # Do not use store_true/store_false because they create opposite defaults. + parser.add_argument( + "--nocolor", + dest="colored", + action="store_const", + const=False, + help="disable colored output, same as NO_COLOR=1", + ) + parser.add_argument( + "--force-color", + dest="colored", + action="store_const", + const=True, + help="Force colored output, same as FORCE_COLOR=1", + ) + parser.add_argument( + "--exclude", + dest="exclude_paths", + action="extend", + nargs="+", + type=str, + default=[], + help="path to directories or files to skip. This option is repeatable.", + ) + parser.add_argument( + "-c", + "--config-file", + dest="config_file", + help="Specify configuration file to use. By default it will look for '.ansible-lint', '.config/ansible-lint.yml', or '.config/ansible-lint.yaml'", + ) + parser.add_argument( + "-i", + "--ignore-file", + dest="ignore_file", + type=Path, + default=None, + help=f"Specify ignore file to use. By default it will look for '{IGNORE_FILE.default}' or '{IGNORE_FILE.alternative}'", + ) + parser.add_argument( + "--offline", + dest="offline", + action="store_const", + const=True, + help="Disable installation of requirements.yml and schema refreshing", + ) + parser.add_argument( + "--version", + action="store_true", + ) + parser.add_argument( + dest="lintables", + nargs="*", + action="extend", + help="One or more files or paths. When missing it will enable auto-detection mode.", + ) + + return parser + + +def merge_config(file_config: dict[Any, Any], cli_config: Options) -> Options: + """Combine the file config with the CLI args.""" + bools = ( + "display_relative_path", + "parseable", + "quiet", + "strict", + "use_default_rules", + "offline", + ) + # maps lists to their default config values + lists_map = { + "exclude_paths": [".cache", ".git", ".hg", ".svn", ".tox"], + "rulesdir": [], + "skip_list": [], + "tags": [], + "warn_list": DEFAULT_WARN_LIST, + "mock_modules": [], + "mock_roles": [], + "enable_list": [], + "only_builtins_allow_collections": [], + "only_builtins_allow_modules": [], + # do not include "write_list" here. See special logic below. + } + + scalar_map = { + "loop_var_prefix": None, + "project_dir": None, + "profile": None, + "sarif_file": None, + } + + if not file_config: + # use defaults if we don't have a config file and the commandline + # parameter is not set + for entry, default in lists_map.items(): + if not getattr(cli_config, entry, None): + setattr(cli_config, entry, default) + return cli_config + + for entry in bools: + file_value = file_config.pop(entry, False) + v = getattr(cli_config, entry) or file_value + setattr(cli_config, entry, v) + + for entry, default in scalar_map.items(): + file_value = file_config.pop(entry, default) + v = getattr(cli_config, entry, None) or file_value + setattr(cli_config, entry, v) + + # if either commandline parameter or config file option is set merge + # with the other, if neither is set use the default + for entry, default in lists_map.items(): + if getattr(cli_config, entry, None) or entry in file_config: + value = getattr(cli_config, entry, []) + value.extend(file_config.pop(entry, [])) + else: + value = default + setattr(cli_config, entry, value) + + # "write_list" config has special merge rules + entry = "write_list" + setattr( + cli_config, + entry, + WriteArgAction.merge_write_list_config( + from_file=file_config.pop(entry, []), + from_cli=getattr(cli_config, entry, []) or [], + ), + ) + + if "verbosity" in file_config: + cli_config.verbosity = cli_config.verbosity + file_config.pop("verbosity") + + # merge options that can be set only via a file config + for entry, value in file_config.items(): + setattr(cli_config, entry, value) + + # append default kinds to the custom list + kinds = file_config.get("kinds", []) + kinds.extend(DEFAULT_KINDS) + cli_config.kinds = kinds + + return cli_config + + +def get_config(arguments: list[str]) -> Options: + """Extract the config based on given args.""" + parser = get_cli_parser() + options = Options(**vars(parser.parse_args(arguments))) + + # docs is not document, being used for internal documentation building + if options.list_rules and options.format not in [ + None, + "brief", + "full", + "md", + ]: + parser.error( + f"argument -f: invalid choice: '{options.format}'. " + f"In combination with argument -L only 'brief', " + f"'rich' or 'md' are supported with -f.", + ) + + # save info about custom config file, as options.config_file may be modified by merge_config + file_config, options.config_file = load_config(options.config_file) + config = merge_config(file_config, options) + + options.rulesdirs = get_rules_dirs( + options.rulesdir, + use_default=options.use_default_rules, + ) + + if not options.project_dir: + project_dir, method = find_project_root( + srcs=options.lintables, + config_file=options.config_file, + ) + options.project_dir = os.path.expanduser(normpath(project_dir)) + log_entries.append( + ( + logging.INFO, + f"Identified [filename]{project_dir}[/] as project root due [bold]{method}[/].", + ), + ) + + if not options.project_dir or not os.path.exists(options.project_dir): + msg = f"Failed to determine a valid project_dir: {options.project_dir}" + raise RuntimeError(msg) + + # expand user home dir in exclude_paths + options.exclude_paths = [ + os.path.expandvars(os.path.expanduser(p)) for p in options.exclude_paths + ] + + # Compute final verbosity level by subtracting -q counter. + options.verbosity -= options.quiet + return config + + +def print_help(file: Any = sys.stdout) -> None: + """Print help test to the given stream.""" + get_cli_parser().print_help(file=file) + + +def get_rules_dirs(rulesdir: list[Path], *, use_default: bool = True) -> list[Path]: + """Return a list of rules dirs.""" + default_ruledirs = [DEFAULT_RULESDIR] + default_custom_rulesdir = os.environ.get( + CUSTOM_RULESDIR_ENVVAR, + os.path.join(DEFAULT_RULESDIR, "custom"), + ) + custom_ruledirs = sorted( + str(x.resolve()) + for x in Path(default_custom_rulesdir).iterdir() + if x.is_dir() and (x / "__init__.py").exists() + ) + + result: list[Any] = [] + if use_default: + result = rulesdir + custom_ruledirs + default_ruledirs + elif rulesdir: + result = rulesdir + else: + result = custom_ruledirs + default_ruledirs + return [Path(p) for p in result] diff --git a/src/ansiblelint/color.py b/src/ansiblelint/color.py new file mode 100644 index 0000000..8f31e1c --- /dev/null +++ b/src/ansiblelint/color.py @@ -0,0 +1,104 @@ +"""Console coloring and terminal support.""" +from __future__ import annotations + +from typing import Any + +import rich +import rich.markdown +from rich.console import Console +from rich.default_styles import DEFAULT_STYLES +from rich.style import Style +from rich.syntax import Syntax +from rich.theme import Theme + +# WARNING: When making style changes, be sure you test the output of +# `ansible-lint -L` on multiple terminals with dark/light themes, including: +# - iTerm2 (macOS) - bold might not be rendered differently +# - vscode integrated terminal - bold might not be rendered differently, links will not work +# +# When it comes to colors being used, try to match: +# - Ansible official documentation theme, https://docs.ansible.com/ansible/latest/dev_guide/developing_api.html +# - VSCode Ansible extension for syntax highlighting +# - GitHub markdown theme +# +# Current values: (docs) +# codeblock border: #404040 +# codeblock background: #edf0f2 +# codeblock comment: #6a737d (also italic) +# teletype-text: #e74c3c (red) +# teletype-text-border: 1px solid #e1e4e5 (background white) +# text: #404040 +# codeblock other-text: #555555 (black-ish) +# codeblock property: #22863a (green) +# codeblock integer: 032f62 (blue) +# codeblock command: #0086b3 (blue) - [shell] +# == python == +# class: #445588 (dark blue and bold) +# docstring: #dd1144 (red) +# self: #999999 (light-gray) +# method/function: #990000 (dark-red) +# number: #009999 cyan +# keywords (def,None,False,len,from,import): #007020 (green) bold +# super|dict|print: #0086b3 light-blue +# __name__: #bb60d5 (magenta) +# string: #dd1144 (light-red) +DEFAULT_STYLES.update( + { + "markdown.code": Style(color="bright_black"), + "markdown.code_block": Style(dim=True, color="cyan"), + }, +) + + +_theme = Theme( + { + "info": "cyan", + "warning": "yellow", + "danger": "bold red", + "title": "yellow", + "error": "bright_red", + "filename": "blue", + }, +) +console_options: dict[str, Any] = {"emoji": False, "theme": _theme, "soft_wrap": True} +console_options_stderr = console_options.copy() +console_options_stderr["stderr"] = True + +console = rich.get_console() +console_stderr = Console(**console_options_stderr) + + +def reconfigure(new_options: dict[str, Any]) -> None: + """Reconfigure console options.""" + console_options = new_options # pylint: disable=redefined-outer-name + rich.reconfigure(**new_options) + # see https://github.com/willmcgugan/rich/discussions/484#discussioncomment-200182 + new_console_options_stderr = console_options.copy() + new_console_options_stderr["stderr"] = True + tmp_console = Console(**new_console_options_stderr) + console_stderr.__dict__ = tmp_console.__dict__ + + +def render_yaml(text: str) -> Syntax: + """Colorize YAMl for nice display.""" + return Syntax(text, "yaml", theme="ansi_dark") + + +# pylint: disable=redefined-outer-name,unused-argument +def _rich_codeblock_custom_rich_console( + self: rich.markdown.CodeBlock, + console: Console, # noqa: ARG001 + options: rich.console.ConsoleOptions, # noqa: ARG001 +) -> rich.console.RenderResult: # pragma: no cover + code = str(self.text).rstrip() + syntax = Syntax( + code, + self.lexer_name, + theme=self.theme, + word_wrap=True, + background_color="default", + ) + yield syntax + + +rich.markdown.CodeBlock.__rich_console__ = _rich_codeblock_custom_rich_console # type: ignore[method-assign] diff --git a/src/ansiblelint/config.py b/src/ansiblelint/config.py new file mode 100644 index 0000000..6164b10 --- /dev/null +++ b/src/ansiblelint/config.py @@ -0,0 +1,317 @@ +"""Store configuration options as a singleton.""" +from __future__ import annotations + +import json +import logging +import os +import sys +import time +import urllib.request +import warnings +from dataclasses import dataclass, field +from functools import lru_cache +from importlib.metadata import PackageNotFoundError, distribution, version +from pathlib import Path +from typing import Any +from urllib.error import HTTPError, URLError + +from packaging.version import Version + +from ansiblelint import __version__ +from ansiblelint.loaders import yaml_from_file + +_logger = logging.getLogger(__name__) + + +CACHE_DIR = ( + os.path.expanduser(os.environ.get("XDG_CACHE_HOME", "~/.cache")) + "/ansible-lint" +) + +DEFAULT_WARN_LIST = [ + "experimental", + "jinja[spacing]", # warning until we resolve all reported false-positives + "fqcn[deep]", # 2023-05-31 added +] + +DEFAULT_KINDS = [ + # Do not sort this list, order matters. + {"jinja2": "**/*.j2"}, # jinja2 templates are not always parsable as something else + {"jinja2": "**/*.j2.*"}, + {"yaml": ".github/**/*.{yaml,yml}"}, # github workflows + {"text": "**/templates/**/*.*"}, # templates are likely not validable + {"execution-environment": "**/execution-environment.yml"}, + {"ansible-lint-config": "**/.ansible-lint"}, + {"ansible-lint-config": "**/.config/ansible-lint.yml"}, + {"ansible-navigator-config": "**/ansible-navigator.{yaml,yml}"}, + {"inventory": "**/inventory/**.{yaml,yml}"}, + {"requirements": "**/meta/requirements.{yaml,yml}"}, # v1 only + # https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html + {"galaxy": "**/galaxy.yml"}, # Galaxy collection meta + {"reno": "**/releasenotes/*/*.{yaml,yml}"}, # reno release notes + {"vars": "**/{host_vars,group_vars,vars,defaults}/**/*.{yaml,yml}"}, + {"tasks": "**/tasks/**/*.{yaml,yml}"}, + {"rulebook": "**/rulebooks/*.{yml,yaml"}, + {"playbook": "**/playbooks/*.{yml,yaml}"}, + {"playbook": "**/*playbook*.{yml,yaml}"}, + {"role": "**/roles/*/"}, + {"handlers": "**/handlers/*.{yaml,yml}"}, + {"test-meta": "**/tests/integration/targets/*/meta/main.{yaml,yml}"}, + {"meta": "**/meta/main.{yaml,yml}"}, + {"meta-runtime": "**/meta/runtime.{yaml,yml}"}, + {"role-arg-spec": "**/meta/argument_specs.{yaml,yml}"}, # role argument specs + {"yaml": ".config/molecule/config.{yaml,yml}"}, # molecule global config + { + "requirements": "**/molecule/*/{collections,requirements}.{yaml,yml}", + }, # molecule old collection requirements (v1), ansible 2.8 only + {"yaml": "**/molecule/*/{base,molecule}.{yaml,yml}"}, # molecule config + {"requirements": "**/requirements.{yaml,yml}"}, # v2 and v1 + {"playbook": "**/molecule/*/*.{yaml,yml}"}, # molecule playbooks + {"yaml": "**/{.ansible-lint,.yamllint}"}, + {"changelog": "**/changelogs/changelog.yaml"}, + {"yaml": "**/*.{yaml,yml}"}, + {"yaml": "**/.*.{yaml,yml}"}, + {"sanity-ignore-file": "**/tests/sanity/ignore-*.txt"}, + # what are these doc_fragments? We also ignore module_utils for now + { + "plugin": "**/plugins/{action,become,cache,callback,connection,filter,inventory,lookup,modules,test}/**/*.py", + }, + {"python": "**/*.py"}, +] + +BASE_KINDS = [ + # These assignations are only for internal use and are only inspired by + # MIME/IANA model. Their purpose is to be able to process a file based on + # it type, including generic processing of text files using the prefix. + { + "text/jinja2": "**/*.j2", + }, # jinja2 templates are not always parsable as something else + {"text/jinja2": "**/*.j2.*"}, + {"text": "**/templates/**/*.*"}, # templates are likely not validable + {"text/json": "**/*.json"}, # standardized + {"text/markdown": "**/*.md"}, # https://tools.ietf.org/html/rfc7763 + {"text/rst": "**/*.rst"}, # https://en.wikipedia.org/wiki/ReStructuredText + {"text/ini": "**/*.ini"}, + # YAML has no official IANA assignation + {"text/yaml": "**/{.ansible-lint,.yamllint}"}, + {"text/yaml": "**/*.{yaml,yml}"}, + {"text/yaml": "**/.*.{yaml,yml}"}, + {"text/python": "**/*.py"}, +] + +PROFILES = yaml_from_file(Path(__file__).parent / "data" / "profiles.yml") + +LOOP_VAR_PREFIX = "^(__|{role}_)" + + +@dataclass +class Options: # pylint: disable=too-many-instance-attributes,too-few-public-methods + """Store ansible-lint effective configuration options.""" + + cache_dir: Path | None = None + colored: bool = True + configured: bool = False + cwd: Path = Path(".") + display_relative_path: bool = True + exclude_paths: list[str] = field(default_factory=list) + format: str = "brief" # noqa: A003 + lintables: list[str] = field(default_factory=list) + list_rules: bool = False + list_tags: bool = False + write_list: list[str] = field(default_factory=list) + parseable: bool = False + quiet: bool = False + rulesdirs: list[Path] = field(default_factory=list) + skip_list: list[str] = field(default_factory=list) + tags: list[str] = field(default_factory=list) + verbosity: int = 0 + warn_list: list[str] = field(default_factory=list) + kinds = DEFAULT_KINDS + mock_filters: list[str] = field(default_factory=list) + mock_modules: list[str] = field(default_factory=list) + mock_roles: list[str] = field(default_factory=list) + loop_var_prefix: str | None = None + only_builtins_allow_collections: list[str] = field(default_factory=list) + only_builtins_allow_modules: list[str] = field(default_factory=list) + var_naming_pattern: str | None = None + offline: bool = False + project_dir: str = "." # default should be valid folder (do not use None here) + extra_vars: dict[str, Any] | None = None + enable_list: list[str] = field(default_factory=list) + skip_action_validation: bool = True + strict: bool = False + rules: dict[str, Any] = field( + default_factory=dict, + ) # Placeholder to set and keep configurations for each rule. + profile: str | None = None + task_name_prefix: str = "{stem} | " + sarif_file: Path | None = None + config_file: str | None = None + generate_ignore: bool = False + rulesdir: list[Path] = field(default_factory=list) + use_default_rules: bool = False + version: bool = False # display version command + list_profiles: bool = False # display profiles command + ignore_file: Path | None = None + + +options = Options() + +# Used to store detected tag deprecations +used_old_tags: dict[str, str] = {} + +# Used to store collection list paths (with mock paths if needed) +collection_list: list[str] = [] + +# Used to store log messages before logging is initialized (level, message) +log_entries: list[tuple[int, str]] = [] + + +def get_rule_config(rule_id: str) -> dict[str, Any]: + """Get configurations for the rule ``rule_id``.""" + rule_config = options.rules.get(rule_id, {}) + if not isinstance(rule_config, dict): # pragma: no branch + msg = f"Invalid rule config for {rule_id}: {rule_config}" + raise RuntimeError(msg) + return rule_config + + +@lru_cache +def ansible_collections_path() -> str: + """Return collection path variable for current version of Ansible.""" + # respect Ansible behavior, which is to load old name if present + for env_var in [ + "ANSIBLE_COLLECTIONS_PATHS", + "ANSIBLE_COLLECTIONS_PATH", + ]: # pragma: no cover + if env_var in os.environ: + return env_var + return "ANSIBLE_COLLECTIONS_PATH" + + +def in_venv() -> bool: + """Determine whether Python is running from a venv.""" + if hasattr(sys, "real_prefix") or os.environ.get("CONDA_EXE", None) is not None: + return True + + pfx = getattr(sys, "base_prefix", sys.prefix) + return pfx != sys.prefix + + +def guess_install_method() -> str: + """Guess if pip upgrade command should be used.""" + package_name = "ansible-lint" + + try: + if (distribution(package_name).read_text("INSTALLER") or "").strip() != "pip": + return "" + except PackageNotFoundError as exc: + logging.debug(exc) + return "" + + pip = "" + if in_venv(): + _logger.debug("Found virtualenv, assuming `pip3 install` will work.") + pip = f"pip install --upgrade {package_name}" + elif __file__.startswith(os.path.expanduser("~/.local/lib")): + _logger.debug( + "Found --user installation, assuming `pip3 install --user` will work.", + ) + pip = f"pip3 install --user --upgrade {package_name}" + + # By default we assume pip is not safe to be used + use_pip = False + try: + # Use pip to detect if is safe to use it to upgrade the package. + # We do imports here to for performance and reasons, and also in order + # to avoid errors if pip internals change. Also we want to avoid having + # to add pip as a dependency, so we make use of it only when present. + + # trick to avoid runtime warning from inside pip: _distutils_hack/__init__.py:33: UserWarning: Setuptools is replacing distutils. + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + # pylint: disable=import-outside-toplevel + from pip._internal.metadata import get_default_environment + from pip._internal.req.req_uninstall import uninstallation_paths + + dist = get_default_environment().get_distribution(package_name) + if dist: + logging.debug("Found %s dist", dist) + for _ in uninstallation_paths(dist): + use_pip = True + else: + logging.debug("Skipping %s as it is not installed.", package_name) + use_pip = False + # pylint: disable=broad-except + except (AttributeError, ModuleNotFoundError) as exc: + # On Fedora 36, we got a AttributeError exception from pip that we want to avoid + # On NixOS, we got a ModuleNotFoundError exception from pip that we want to avoid + logging.debug(exc) + use_pip = False + + # We only want to recommend pip for upgrade if it looks safe to do so. + return pip if use_pip else "" + + +def get_deps_versions() -> dict[str, Version | None]: + """Return versions of most important dependencies.""" + result: dict[str, Version | None] = {} + + for name in ["ansible-core", "ansible-compat", "ruamel-yaml", "ruamel-yaml-clib"]: + try: + result[name] = Version(version(name)) + except PackageNotFoundError: + result[name] = None + return result + + +def get_version_warning() -> str: + """Display warning if current version is outdated.""" + # 0.1dev1 is special fallback version + if __version__ == "0.1.dev1": # pragma: no cover + return "" + + msg = "" + data = {} + current_version = Version(__version__) + + if not os.path.exists(CACHE_DIR): # pragma: no cover + os.makedirs(CACHE_DIR) + cache_file = f"{CACHE_DIR}/latest.json" + refresh = True + if os.path.exists(cache_file): + age = time.time() - os.path.getmtime(cache_file) + if age < 24 * 60 * 60: + refresh = False + with open(cache_file, encoding="utf-8") as f: + data = json.load(f) + + if refresh or not data: + release_url = ( + "https://api.github.com/repos/ansible/ansible-lint/releases/latest" + ) + try: + with urllib.request.urlopen(release_url) as url: # noqa: S310 + data = json.load(url) + with open(cache_file, "w", encoding="utf-8") as f: + json.dump(data, f) + except (URLError, HTTPError) as exc: # pragma: no cover + _logger.debug( + "Unable to fetch latest version from %s due to: %s", + release_url, + exc, + ) + return "" + + html_url = data["html_url"] + new_version = Version(data["tag_name"][1:]) # removing v prefix from tag + + if current_version > new_version: + msg = "[dim]You are using a pre-release version of ansible-lint.[/]" + elif current_version < new_version: + msg = f"""[warning]A new release of ansible-lint is available: [red]{current_version}[/] → [green][link={html_url}]{new_version}[/][/][/]""" + + pip = guess_install_method() + if pip: + msg += f" Upgrade by running: [info]{pip}[/]" + + return msg diff --git a/src/ansiblelint/constants.py b/src/ansiblelint/constants.py new file mode 100644 index 0000000..6b8bd12 --- /dev/null +++ b/src/ansiblelint/constants.py @@ -0,0 +1,189 @@ +"""Constants used by AnsibleLint.""" +from enum import Enum +from pathlib import Path +from typing import Literal + +DEFAULT_RULESDIR = Path(__file__).parent / "rules" +CUSTOM_RULESDIR_ENVVAR = "ANSIBLE_LINT_CUSTOM_RULESDIR" +RULE_DOC_URL = "https://ansible-lint.readthedocs.io/rules/" + + +# Not using an IntEnum because only starting with py3.11 it will evaluate it +# as int. +class RC: # pylint: disable=too-few-public-methods + """All exit codes used by ansible-lint.""" + + SUCCESS = 0 + VIOLATIONS_FOUND = 2 + INVALID_CONFIG = 3 + LOCK_TIMEOUT = 4 + NO_FILES_MATCHED = 5 + EXIT_CONTROL_C = 130 + + +# Minimal version of Ansible we support for runtime +ANSIBLE_MIN_VERSION = "2.12" + +ANSIBLE_MOCKED_MODULE = """\ +# This is a mocked Ansible module generated by ansible-lint +from ansible.module_utils.basic import AnsibleModule + +DOCUMENTATION = ''' +module: {name} + +short_description: Mocked +version_added: "1.0.0" +description: Mocked + +author: + - ansible-lint (@nobody) +''' +EXAMPLES = '''mocked''' +RETURN = '''mocked''' + + +def main(): + result = dict( + changed=False, + original_message='', + message='') + + module = AnsibleModule( + argument_spec=dict(), + supports_check_mode=True, + ) + module.exit_json(**result) + + +if __name__ == "__main__": + main() +""" + +FileType = Literal[ + "playbook", + "rulebook", + "meta", # role meta + "meta-runtime", + "tasks", # includes pre_tasks, post_tasks + "handlers", # very similar to tasks but with some specifics + # https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#installing-roles-and-collections-from-the-same-requirements-yml-file + "requirements", + "role", # that is a folder! + "yaml", # generic yaml file, previously reported as unknown file type + "ansible-lint-config", + "sanity-ignore-file", # tests/sanity/ignore file + "plugin", + "", # unknown file type +] + + +# Aliases for deprecated tags/ids and their newer names +RENAMED_TAGS = { + "102": "no-jinja-when", + "104": "deprecated-bare-vars", + "105": "deprecated-module", + "106": "role-name", + "202": "risky-octal", + "203": "no-tabs", + "205": "playbook-extension", + "206": "jinja[spacing]", + "207": "jinja[invalid]", + "208": "risky-file-permissions", + "301": "no-changed-when", + "302": "deprecated-command-syntax", + "303": "command-instead-of-module", + "304": "inline-env-var", + "305": "command-instead-of-shell", + "306": "risky-shell-pipe", + "401": "latest[git]", + "402": "latest[hg]", + "403": "package-latest", + "404": "no-relative-paths", + "501": "partial-become", + "502": "name[missing]", + "503": "no-handler", + "504": "deprecated-local-action", + "505": "missing-import", + "601": "literal-compare", + "602": "empty-string-compare", + "702": "meta-no-tags", + "703": "meta-incorrect", + "704": "meta-video-links", + "911": "syntax-check", + "deprecated-command-syntax": "no-free-form", + "fqcn-builtins": "fqcn[action-core]", + "git-latest": "latest[git]", + "hg-latest": "latest[hg]", + "no-jinja-nesting": "jinja[invalid]", + "no-loop-var-prefix": "loop-var-prefix", + "unnamed-task": "name[missing]", + "var-spacing": "jinja[spacing]", +} + +PLAYBOOK_TASK_KEYWORDS = [ + "tasks", + "handlers", + "pre_tasks", + "post_tasks", +] +NESTED_TASK_KEYS = [ + "block", + "always", + "rescue", +] + +# Keys that are used internally when parsing YAML/JSON files +SKIPPED_RULES_KEY = "__skipped_rules__" +LINE_NUMBER_KEY = "__line__" +FILENAME_KEY = "__file__" +ANNOTATION_KEYS = [ + FILENAME_KEY, + LINE_NUMBER_KEY, + SKIPPED_RULES_KEY, + "__ansible_module__", + "__ansible_module_original__", +] +INCLUSION_ACTION_NAMES = { + "include", + "include_tasks", + "import_playbook", + "import_tasks", + "ansible.builtin.include", + "ansible.builtin.include_tasks", + "ansible.builtin.import_playbook", + "ansible.builtin.import_tasks", +} + +ROLE_IMPORT_ACTION_NAMES = { + "ansible.builtin.import_role", + "ansible.builtin.include_role", + "ansible.legacy.import_role", + "ansible.legacy.include_role", + "import_role", + "include_role", +} + +# Newer versions of git might fail to run when different file ownership is +# found of repo. One example is on GHA runners executing containerized +# reusable actions, where the mounted volume might have different owner. +# +# https://github.com/ansible/ansible-lint-action/issues/138 +GIT_CMD = ["git", "-c", f"safe.directory={Path.cwd()}"] + +CONFIG_FILENAMES = [ + ".ansible-lint", + ".config/ansible-lint.yml", + ".config/ansible-lint.yaml", +] + + +class States(Enum): + """States used are used as sentinel values in various places.""" + + NOT_LOADED = "File not loaded" + LOAD_FAILED = "File failed to load" + UNKNOWN_DATA = "Unknown data" + + def __bool__(self) -> bool: + """Ensure all states evaluate as False as booleans.""" + return False diff --git a/src/ansiblelint/data/profiles.yml b/src/ansiblelint/data/profiles.yml new file mode 100644 index 0000000..0749ad5 --- /dev/null +++ b/src/ansiblelint/data/profiles.yml @@ -0,0 +1,119 @@ +--- +# Do not change sorting order of the primary keys as they also represent how +# progressive the profiles are, each one extending the one before it. +min: + description: > + The `min` profile ensures that Ansible can load content. + Rules in this profile are mandatory because they prevent fatal errors. + You can add files to the exclude list or provide dependencies to load the + correct files. + extends: null + rules: + internal-error: + load-failure: + parser-error: + syntax-check: +basic: + description: > + The `basic` profile prevents common coding issues and enforces standard styles and formatting. + extends: min + rules: + command-instead-of-module: + command-instead-of-shell: + deprecated-bare-vars: + deprecated-local-action: + deprecated-module: + inline-env-var: + key-order: + literal-compare: + jinja: + no-free-form: # schema-related + url: https://github.com/ansible/ansible-lint/issues/2117 + no-jinja-when: + no-tabs: + partial-become: + playbook-extension: + role-name: + schema: # can cover lots of rules, but not really be able to give best error messages + name: + var-naming: + yaml: + skip_list: # just because we enable them in following profiles + - name[template] + - name[casing] +moderate: + description: > + The `moderate` profile ensures that content adheres to best practices for making content easier to read and maintain. + extends: basic + rules: + name[template]: + name[imperative]: + url: https://github.com/ansible/ansible-lint/issues/2170 + name[casing]: + spell-var-name: + url: https://github.com/ansible/ansible-lint/issues/2168 +safety: + description: > + The `safety` profile avoids module calls that can have non-determinant outcomes or security concerns. + extends: moderate + rules: + avoid-implicit: + latest: + package-latest: + risky-file-permissions: + risky-octal: + risky-shell-pipe: +shared: + description: > + The `shared` profile ensures that content follows best practices for packaging and publishing. + This profile is intended for content creators who want to make Ansible + playbooks, roles, or collections available from + [galaxy.ansible.com](https://galaxy.ansible.com/), + [automation-hub](https://console.redhat.com/ansible/automation-hub), + or a private instance. + extends: safety + rules: + galaxy: # <-- applies to both galaxy and automation-hub + ignore-errors: + layout: + url: https://github.com/ansible/ansible-lint/issues/1900 + meta-incorrect: + meta-no-tags: + meta-video-links: + meta-version: + url: https://github.com/ansible/ansible-lint/issues/2103 + meta-runtime: + url: https://github.com/ansible/ansible-lint/issues/2102 + no-changed-when: + no-changelog: + url: https://github.com/ansible/ansible-lint/issues/2101 + no-handler: + no-relative-paths: + max-block-depth: + url: https://github.com/ansible/ansible-lint/issues/2173 + max-tasks: + url: https://github.com/ansible/ansible-lint/issues/2172 + unsafe-loop: + # unsafe-loop[prefix] (currently named "no-var-prefix") + # [unsafe-loop[var-prefix|iterator]] + url: https://github.com/ansible/ansible-lint/issues/2038 +production: + description: > + The `production` profile ensures that content meets requirements for + inclusion in [Ansible Automation Platform (AAP)](https://www.redhat.com/en/technologies/management/ansible) + as validated or certified content. + extends: shared + rules: + avoid-dot-notation: + url: https://github.com/ansible/ansible-lint/issues/2174 + sanity: + url: https://github.com/ansible/ansible-lint/issues/2121 + fqcn: + import-task-no-when: + url: https://github.com/ansible/ansible-lint/issues/2219 + meta-no-dependencies: + url: https://github.com/ansible/ansible-lint/issues/2159 + single-entry-point: + url: https://github.com/ansible/ansible-lint/issues/2242 + use-loop: + url: https://github.com/ansible/ansible-lint/issues/2204 diff --git a/src/ansiblelint/errors.py b/src/ansiblelint/errors.py new file mode 100644 index 0000000..c8458b8 --- /dev/null +++ b/src/ansiblelint/errors.py @@ -0,0 +1,162 @@ +"""Exceptions and error representations.""" +from __future__ import annotations + +import functools +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from ansiblelint._internal.rules import BaseRule, RuntimeErrorRule +from ansiblelint.config import options +from ansiblelint.file_utils import Lintable + +if TYPE_CHECKING: + from ansiblelint.utils import Task + + +class LintWarning(Warning): + """Used by linter.""" + + +@dataclass +class WarnSource: + """Container for warning information, so we can later create a MatchError from it.""" + + filename: Lintable + lineno: int + tag: str + message: str | None = None + + +class StrictModeError(RuntimeError): + """Raise when we encounter a warning in strict mode.""" + + def __init__( + self, + message: str = "Warning treated as error due to --strict option.", + ): + """Initialize a StrictModeError instance.""" + super().__init__(message) + + +# pylint: disable=too-many-instance-attributes +@dataclass(unsafe_hash=True) +@functools.total_ordering +class MatchError(ValueError): + """Rule violation detected during linting. + + It can be raised as Exception but also just added to the list of found + rules violations. + + Note that line argument is not considered when building hash of an + instance. + """ + + # order matters for these: + message: str = field(init=True, repr=False, default="") + lintable: Lintable = field(init=True, repr=False, default=Lintable(name="")) + filename: str = field(init=True, repr=False, default="") + + tag: str = field(init=True, repr=False, default="") + lineno: int = 1 + details: str = "" + column: int | None = None + # rule is not included in hash because we might have different instances + # of the same rule, but we use the 'tag' to identify the rule. + rule: BaseRule = field(hash=False, default=RuntimeErrorRule()) + ignored: bool = False + fixed: bool = False # True when a transform has resolved this MatchError + + def __post_init__(self) -> None: + """Can be use by rules that can report multiple errors type, so we can still filter by them.""" + if not self.lintable and self.filename: + self.lintable = Lintable(self.filename) + elif self.lintable and not self.filename: + self.filename = self.lintable.name + + # We want to catch accidental MatchError() which contains no useful + # information. When no arguments are passed, the '_message' field is + # set to 'property', only if passed it becomes a string. + if self.rule.__class__ is RuntimeErrorRule: + # so instance was created without a rule + if not self.message: + msg = f"{self.__class__.__name__}() missing a required argument: one of 'message' or 'rule'" + raise TypeError(msg) + if not isinstance(self.tag, str): + msg = "MatchErrors must be created with either rule or tag specified." + raise TypeError(msg) + if not self.message: + self.message = self.rule.shortdesc + + self.match_type: str | None = None + # for task matches, save the normalized task object (useful for transforms) + self.task: Task | None = None + # path to the problem area, like: [0,"pre_tasks",3] for [0].pre_tasks[3] + self.yaml_path: list[int | str] = [] + + if not self.tag: + self.tag = self.rule.id + + # Safety measure to ensure we do not end-up with incorrect indexes + if self.lineno == 0: # pragma: no cover + msg = "MatchError called incorrectly as line numbers start with 1" + raise RuntimeError(msg) + if self.column == 0: # pragma: no cover + msg = "MatchError called incorrectly as column numbers start with 1" + raise RuntimeError(msg) + + @functools.cached_property + def level(self) -> str: + """Return the level of the rule: error, warning or notice.""" + if not self.ignored and {self.tag, self.rule.id, *self.rule.tags}.isdisjoint( + options.warn_list, + ): + return "error" + return "warning" + + def __repr__(self) -> str: + """Return a MatchError instance representation.""" + formatstr = "[{0}] ({1}) matched {2}:{3} {4}" + # note that `rule.id` can be int, str or even missing, as users + # can defined their own custom rules. + _id = getattr(self.rule, "id", "000") + + return formatstr.format( + _id, + self.message, + self.filename, + self.lineno, + self.details, + ) + + @property + def position(self) -> str: + """Return error positioning, with column number if available.""" + if self.column: + return f"{self.lineno}:{self.column}" + return str(self.lineno) + + @property + def _hash_key(self) -> Any: + # line attr is knowingly excluded, as dict is not hashable + return ( + self.filename, + self.lineno, + str(getattr(self.rule, "id", 0)), + self.message, + self.details, + # -1 is used here to force errors with no column to sort before + # all other errors. + -1 if self.column is None else self.column, + ) + + def __lt__(self, other: object) -> bool: + """Return whether the current object is less than the other.""" + if not isinstance(other, self.__class__): + return NotImplemented + return bool(self._hash_key < other._hash_key) + + def __eq__(self, other: object) -> bool: + """Identify whether the other object represents the same rule match.""" + if not isinstance(other, self.__class__): + return NotImplemented + return self.__hash__() == other.__hash__() diff --git a/src/ansiblelint/file_utils.py b/src/ansiblelint/file_utils.py new file mode 100644 index 0000000..15c92d2 --- /dev/null +++ b/src/ansiblelint/file_utils.py @@ -0,0 +1,584 @@ +"""Utility functions related to file operations.""" +from __future__ import annotations + +import copy +import logging +import os +import sys +from collections import defaultdict +from contextlib import contextmanager +from pathlib import Path +from tempfile import NamedTemporaryFile +from typing import TYPE_CHECKING, Any, cast + +import pathspec +import wcmatch.pathlib +import wcmatch.wcmatch +from yaml.error import YAMLError + +from ansiblelint.config import BASE_KINDS, Options, options +from ansiblelint.constants import CONFIG_FILENAMES, FileType, States + +if TYPE_CHECKING: + from collections.abc import Iterator, Sequence + + +_logger = logging.getLogger(__package__) + + +def abspath(path: str, base_dir: str) -> str: + """Make relative path absolute relative to given directory. + + path (str): the path to make absolute + base_dir (str): the directory from which make relative paths absolute. + """ + if not os.path.isabs(path): + # Don't use abspath as it assumes path is relative to cwd. + # We want it relative to base_dir. + path = os.path.join(base_dir, path) + + return os.path.normpath(path) + + +def normpath(path: str | Path) -> str: + """Normalize a path in order to provide a more consistent output. + + Currently it generates a relative path but in the future we may want to + make this user configurable. + """ + # prevent possible ValueError with relpath(), when input is an empty string + if not path: + path = "." + # conversion to string in order to allow receiving non string objects + relpath = os.path.relpath(str(path)) + path_absolute = os.path.abspath(str(path)) + if path_absolute.startswith(os.getcwd()): + return relpath + if path_absolute.startswith(os.path.expanduser("~")): + return path_absolute.replace(os.path.expanduser("~"), "~") + # we avoid returning relative paths that end-up at root level + if path_absolute in relpath: + return path_absolute + if relpath.startswith("../"): + return path_absolute + return relpath + + +# That is needed for compatibility with py38, later was added to Path class +def is_relative_to(path: Path, *other: Any) -> bool: + """Return True if the path is relative to another path or False.""" + try: + path.resolve().absolute().relative_to(*other) + return True + except ValueError: + return False + + +def normpath_path(path: str | Path) -> Path: + """Normalize a path in order to provide a more consistent output. + + - Any symlinks are resolved. + - Any paths outside the CWD are resolved to their absolute path. + - Any absolute path within current user home directory is compressed to + make use of '~', so it is easier to read and more portable. + """ + if not isinstance(path, Path): + path = Path(path) + + is_relative = is_relative_to(path, path.cwd()) + path = path.resolve() + if is_relative: + path = path.relative_to(path.cwd()) + + # Compress any absolute path within current user home directory + if path.is_absolute(): + home = Path.home() + if is_relative_to(path, home): + path = Path("~") / path.relative_to(home) + + return path + + +@contextmanager +def cwd(path: Path) -> Iterator[None]: + """Context manager for temporary changing current working directory.""" + old_pwd = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(old_pwd) + + +def expand_path_vars(path: str) -> str: + """Expand the environment or ~ variables in a path string.""" + # It may be possible for function to be called with a Path object + path = str(path).strip() + path = os.path.expanduser(path) + path = os.path.expandvars(path) + return path + + +def expand_paths_vars(paths: list[str]) -> list[str]: + """Expand the environment or ~ variables in a list.""" + paths = [expand_path_vars(p) for p in paths] + return paths + + +def kind_from_path(path: Path, *, base: bool = False) -> FileType: + """Determine the file kind based on its name. + + When called with base=True, it will return the base file type instead + of the explicit one. That is expected to return 'yaml' for any yaml files. + """ + # pathlib.Path.match patterns are very limited, they do not support *a*.yml + # glob.glob supports **/foo.yml but not multiple extensions + pathex = wcmatch.pathlib.PurePath(str(path.absolute().resolve())) + kinds = options.kinds if not base else BASE_KINDS + for entry in kinds: + for k, v in entry.items(): + if pathex.globmatch( + v, + flags=( + wcmatch.pathlib.GLOBSTAR + | wcmatch.pathlib.BRACE + | wcmatch.pathlib.DOTGLOB + ), + ): + return str(k) # type: ignore[return-value] + + if base: + # Unknown base file type is default + return "" + + if path.is_dir(): + known_role_subfolders = ("tasks", "meta", "vars", "defaults", "handlers") + for filename in known_role_subfolders: + if (path / filename).is_dir(): + return "role" + _logger.debug( + "Folder `%s` does not look like a role due to missing any of the common subfolders such: %s.", + path, + ", ".join(known_role_subfolders), + ) + + if str(path) == "/dev/stdin": + return "playbook" + + # Unknown file types report a empty string (evaluated as False) + return "" + + +# pylint: disable=too-many-instance-attributes +class Lintable: + """Defines a file/folder that can be linted. + + Providing file content when creating the object allow creation of in-memory + instances that do not need files to be present on disk. + + When symlinks are given, they will always be resolved to their target. + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + name: str | Path, + content: str | None = None, + kind: FileType | None = None, + base_kind: str = "", + parent: Lintable | None = None, + ): + """Create a Lintable instance.""" + self.dir: str = "" + self.kind: FileType | None = None + self.stop_processing = False # Set to stop other rules from running + self.state: Any = States.NOT_LOADED + self.line_skips: dict[int, set[str]] = defaultdict(set) + self.exc: Exception | None = None # Stores data loading exceptions + self.parent = parent + self.explicit = False # Indicates if the file was explicitly provided or was indirectly included. + + if isinstance(name, str): + name = Path(name) + is_relative = is_relative_to(name, str(name.cwd())) + name = name.resolve() + if is_relative: + name = name.relative_to(name.cwd()) + name = normpath_path(name) + # we need to be sure that we expanduser() because otherwise a simple + # test like .path.exists() will return unexpected results. + self.path = name.expanduser() + # Filename is effective file on disk, for stdin is a namedtempfile + self.name = self.filename = str(name) + + self._content = self._original_content = content + self.updated = False + + # if the lintable is part of a role, we save role folder name + self.role = "" + parts = self.path.parent.parts + if "roles" in parts: + role = self.path + while role.parent.name != "roles" and role.name: + role = role.parent + if role.exists(): + self.role = role.name + + if str(self.path) in ["/dev/stdin", "-"]: + # pylint: disable=consider-using-with + self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml") + self.filename = self.file.name + self._content = sys.stdin.read() + self.file.write(self._content) + self.file.flush() + self.path = Path(self.file.name) + self.name = "stdin" + self.kind = "playbook" + self.dir = "/" + else: + self.kind = kind or kind_from_path(self.path) + # We store absolute directory in dir + if not self.dir: + if self.kind == "role": + self.dir = str(self.path.resolve()) + else: + self.dir = str(self.path.parent.resolve()) + + # determine base file kind (yaml, xml, ini, ...) + self.base_kind = base_kind or kind_from_path(self.path, base=True) + self.abspath = self.path.expanduser().absolute() + + if self.kind == "tasks": + self.parent = _guess_parent(self) + + if self.kind == "yaml": + _ = self.data # pylint: disable=pointless-statement + + def _guess_kind(self) -> None: + if self.kind == "yaml": + if ( + isinstance(self.data, list) + and len(self.data) > 0 + and ( + "hosts" in self.data[0] + or "import_playbook" in self.data[0] + or "ansible.builtin.import_playbook" in self.data[0] + ) + ): + if "rules" not in self.data[0]: + self.kind = "playbook" + else: + self.kind = "rulebook" + # we we failed to guess the more specific kind, we warn user + if self.kind == "yaml": + _logger.debug( + "Passed '%s' positional argument was identified as generic '%s' file kind.", + self.name, + self.kind, + ) + + def __getitem__(self, key: Any) -> Any: + """Provide compatibility subscriptable support.""" + if key == "path": + return str(self.path) + if key == "type": + return str(self.kind) + raise NotImplementedError + + def get(self, key: Any, default: Any = None) -> Any: + """Provide compatibility subscriptable support.""" + try: + return self[key] + except NotImplementedError: + return default + + def _populate_content_cache_from_disk(self) -> None: + # Can raise UnicodeDecodeError + self._content = self.path.expanduser().resolve().read_text(encoding="utf-8") + + if self._original_content is None: + self._original_content = self._content + + @property + def content(self) -> str: + """Retrieve file content, from internal cache or disk.""" + if self._content is None: + self._populate_content_cache_from_disk() + return cast(str, self._content) + + @content.setter + def content(self, value: str) -> None: + """Update ``content`` and calculate ``updated``. + + To calculate ``updated`` this will read the file from disk if the cache + has not already been populated. + """ + if not isinstance(value, str): + msg = f"Expected str but got {type(value)}" + raise TypeError(msg) + if self._original_content is None: + if self._content is not None: + self._original_content = self._content + elif self.path.exists(): + self._populate_content_cache_from_disk() + else: + # new file + self._original_content = "" + self.updated = self._original_content != value + self._content = value + + @content.deleter + def content(self) -> None: + """Reset the internal content cache.""" + self._content = None + + def write(self, *, force: bool = False) -> None: + """Write the value of ``Lintable.content`` to disk. + + This only writes to disk if the content has been updated (``Lintable.updated``). + For example, you can update the content, and then write it to disk like this: + + .. code:: python + + lintable.content = new_content + lintable.write() + + Use ``force=True`` when you want to force a content rewrite even if the + content has not changed. For example: + + .. code:: python + + lintable.write(force=True) + """ + if not force and not self.updated: + # No changes to write. + return + self.path.expanduser().resolve().write_text( + self._content or "", + encoding="utf-8", + ) + + def __hash__(self) -> int: + """Return a hash value of the lintables.""" + return hash((self.name, self.kind, self.abspath)) + + def __eq__(self, other: object) -> bool: + """Identify whether the other object represents the same rule match.""" + if isinstance(other, Lintable): + return bool(self.name == other.name and self.kind == other.kind) + return False + + def __repr__(self) -> str: + """Return user friendly representation of a lintable.""" + return f"{self.name} ({self.kind})" + + @property + def data(self) -> Any: + """Return loaded data representation for current file, if possible.""" + if self.state == States.NOT_LOADED: + if self.path.is_dir(): + self.state = None + return self.state + try: + if str(self.base_kind) == "text/yaml": + from ansiblelint.utils import ( # pylint: disable=import-outside-toplevel + parse_yaml_linenumbers, + ) + + self.state = parse_yaml_linenumbers(self) + # now that _data is not empty, we can try guessing if playbook or rulebook + # it has to be done before append_skipped_rules() call as it's relying + # on self.kind. + if self.kind == "yaml": + self._guess_kind() + # Lazy import to avoid delays and cyclic-imports + if "append_skipped_rules" not in globals(): + # pylint: disable=import-outside-toplevel + from ansiblelint.skip_utils import append_skipped_rules + + self.state = append_skipped_rules(self.state, self) + else: + logging.debug( + "data set to None for %s due to being '%s' (%s) kind.", + self.path, + self.kind, + self.base_kind or "unknown", + ) + self.state = States.UNKNOWN_DATA + + except ( + RuntimeError, + FileNotFoundError, + YAMLError, + UnicodeDecodeError, + ) as exc: + self.state = States.LOAD_FAILED + self.exc = exc + return self.state + + +# pylint: disable=redefined-outer-name +def discover_lintables(options: Options) -> list[str]: + """Find all files that we know how to lint. + + Return format is normalized, relative for stuff below cwd, ~/ for content + under current user and absolute for everything else. + """ + if not options.lintables: + options.lintables = ["."] + + return [ + str(filename) + for filename in get_all_files( + *[Path(s) for s in options.lintables], + exclude_paths=options.exclude_paths, + ) + ] + + +def strip_dotslash_prefix(fname: str) -> str: + """Remove ./ leading from filenames.""" + return fname[2:] if fname.startswith("./") else fname + + +def find_project_root( + srcs: Sequence[str], + config_file: str | None = None, +) -> tuple[Path, str]: + """Return a directory containing .git or ansible-lint config files. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + + Returns a two-tuple with the first element as the project root path and + the second element as a string describing the method by which the + project root was discovered. + """ + directory = None + if not srcs: + srcs = [str(Path.cwd().resolve().absolute())] + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + cfg_files = [config_file] if config_file else CONFIG_FILENAMES + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory, ".git directory" + + if (directory / ".hg").is_dir(): + return directory, ".hg directory" + + for cfg_file in cfg_files: + # note that if cfg_file is already absolute, 'directory' is ignored + resolved_cfg_path = directory / cfg_file + if resolved_cfg_path.is_file(): + if os.path.isabs(cfg_file): + directory = Path(cfg_file).parent + if directory.name == ".config": + directory = directory.parent + return directory, f"config file {resolved_cfg_path}" + + if not directory: + return Path.cwd(), "current working directory" + return directory, "file system root" + + +def expand_dirs_in_lintables(lintables: set[Lintable]) -> None: + """Return all recognized lintables within given directory.""" + should_expand = False + + for item in lintables: + if item.path.is_dir(): + should_expand = True + break + + if should_expand: + # this relies on git and we do not want to call unless needed + all_files = discover_lintables(options) + + for item in copy.copy(lintables): + if item.path.is_dir(): + for filename in all_files: + if filename.startswith(str(item.path)): + lintables.add(Lintable(filename)) + + +def _guess_parent(lintable: Lintable) -> Lintable | None: + """Return a parent directory for a lintable.""" + try: + if lintable.path.parents[2].name == "roles": + # role_name = lintable.parents[1].name + return Lintable(lintable.path.parents[1], kind="role") + except IndexError: + pass + return None + + +def get_all_files( + *paths: Path, + exclude_paths: list[str] | None = None, +) -> list[Path]: + """Recursively retrieve all files from given folders.""" + all_files: list[Path] = [] + exclude_paths = [] if exclude_paths is None else exclude_paths + + def is_excluded(path_to_check: Path) -> bool: + """Check if a file is exclude by current specs.""" + return any( + spec.match_file(pathspec.util.append_dir_sep(path_to_check)) + for spec in pathspecs + ) + + for path in paths: + pathspecs = [ + pathspec.GitIgnoreSpec.from_lines( + [ + ".git", + ".tox", + ".mypy_cache", + "__pycache__", + ".DS_Store", + ".coverage", + ".pytest_cache", + ".ruff_cache", + *exclude_paths, + ], + ), + ] + gitignore = path / ".gitignore" + if gitignore.exists(): + with gitignore.open(encoding="UTF-8") as f: + _logger.info("Loading ignores from %s", gitignore) + pathspecs.append( + pathspec.GitIgnoreSpec.from_lines(f.read().splitlines()), + ) + + # Iterate over all items in the directory + if path.is_file(): + all_files.append(path) + else: + for item in sorted(path.iterdir()): + if is_excluded(item): + _logger.info("Excluded: %s", item) + continue + if item.is_file(): + all_files.append(item) + # If it's a directory, recursively call the function + elif item.is_dir(): + all_files.extend(get_all_files(item, exclude_paths=exclude_paths)) + + return all_files diff --git a/src/ansiblelint/formatters/__init__.py b/src/ansiblelint/formatters/__init__.py new file mode 100644 index 0000000..9ddca00 --- /dev/null +++ b/src/ansiblelint/formatters/__init__.py @@ -0,0 +1,308 @@ +"""Output formatters.""" +from __future__ import annotations + +import hashlib +import json +import os +from pathlib import Path +from typing import TYPE_CHECKING, Any, Generic, TypeVar + +import rich + +from ansiblelint.config import options +from ansiblelint.version import __version__ + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + +T = TypeVar("T", bound="BaseFormatter") # type: ignore[type-arg] + + +class BaseFormatter(Generic[T]): + """Formatter of ansible-lint output. + + Base class for output formatters. + + Args: + ---- + base_dir (str|Path): reference directory against which display relative path. + display_relative_path (bool): whether to show path as relative or absolute + """ + + def __init__(self, base_dir: str | Path, display_relative_path: bool) -> None: + """Initialize a BaseFormatter instance.""" + if isinstance(base_dir, str): + base_dir = Path(base_dir) + if base_dir: # can be None + base_dir = base_dir.absolute() + + self.base_dir = base_dir if display_relative_path else None + + def _format_path(self, path: str | Path) -> str | Path: + if not self.base_dir or not path: + return path + # Use os.path.relpath 'cause Path.relative_to() misbehaves + rel_path = os.path.relpath(path, start=self.base_dir) + # Avoid returning relative paths that go outside of base_dir + if rel_path.startswith(".."): + return path + return rel_path + + def apply(self, match: MatchError) -> str: + """Format a match error.""" + return str(match) + + @staticmethod + def escape(text: str) -> str: + """Escapes a string to avoid processing it as markup.""" + return rich.markup.escape(text) + + +class Formatter(BaseFormatter): # type: ignore[type-arg] + """Default output formatter of ansible-lint.""" + + def apply(self, match: MatchError) -> str: + _id = getattr(match.rule, "id", "000") + result = f"[{match.level}][bold][link={match.rule.url}]{self.escape(match.tag)}[/link][/][/][dim]:[/] [{match.level}]{self.escape(match.message)}[/]" + if match.level != "error": + result += f" [dim][{match.level}]({match.level})[/][/]" + if match.ignored: + result += " [dim]# ignored[/]" + result += ( + "\n" + f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}" + ) + if match.details: + result += f" [dim]{self.escape(str(match.details))}[/]" + result += "\n" + return result + + +class QuietFormatter(BaseFormatter[Any]): + """Brief output formatter for ansible-lint.""" + + def apply(self, match: MatchError) -> str: + return ( + f"[{match.level}]{match.rule.id}[/] " + f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}" + ) + + +class ParseableFormatter(BaseFormatter[Any]): + """Parseable uses PEP8 compatible format.""" + + def apply(self, match: MatchError) -> str: + result = ( + f"[filename]{self._format_path(match.filename or '')}[/][dim]:{match.position}:[/] " + f"[{match.level}][bold]{self.escape(match.tag)}[/bold]" + f"{ f': {match.message}' if not options.quiet else '' }[/]" + ) + if match.level != "error": + result += f" [dim][{match.level}]({match.level})[/][/]" + + return result + + +class AnnotationsFormatter(BaseFormatter): # type: ignore[type-arg] + # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message + """Formatter for emitting violations as GitHub Workflow Commands. + + These commands trigger the GHA Workflow runners platform to post violations + in a form of GitHub Checks API annotations that appear rendered in pull- + request files view. + + ::debug file={name},line={line},col={col},severity={severity}::{message} + ::warning file={name},line={line},col={col},severity={severity}::{message} + ::error file={name},line={line},col={col},severity={severity}::{message} + + Supported levels: debug, warning, error + """ + + def apply(self, match: MatchError) -> str: + """Prepare a match instance for reporting as a GitHub Actions annotation.""" + file_path = self._format_path(match.filename or "") + line_num = match.lineno + severity = match.rule.severity + violation_details = self.escape(match.message) + col = f",col={match.column}" if match.column else "" + return ( + f"::{match.level} file={file_path},line={line_num}{col},severity={severity},title={match.tag}" + f"::{violation_details}" + ) + + +class CodeclimateJSONFormatter(BaseFormatter[Any]): + """Formatter for emitting violations in Codeclimate JSON report format. + + The formatter expects a list of MatchError objects and returns a JSON formatted string. + The spec for the codeclimate report can be found here: + https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#user-content-data-types + """ + + def format_result(self, matches: list[MatchError]) -> str: + """Format a list of match errors as a JSON string.""" + if not isinstance(matches, list): + msg = f"The {self.__class__} was expecting a list of MatchError." + raise RuntimeError(msg) + + result = [] + for match in matches: + issue: dict[str, Any] = {} + issue["type"] = "issue" + issue["check_name"] = match.tag or match.rule.id # rule-id[subrule-id] + issue["categories"] = match.rule.tags + if match.rule.url: + # https://github.com/codeclimate/platform/issues/68 + issue["url"] = match.rule.url + issue["severity"] = self._remap_severity(match) + issue["description"] = self.escape(str(match.message)) + issue["fingerprint"] = hashlib.sha256( + repr(match).encode("utf-8"), + ).hexdigest() + issue["location"] = {} + issue["location"]["path"] = self._format_path(match.filename or "") + if match.column: + issue["location"]["positions"] = {} + issue["location"]["positions"]["begin"] = {} + issue["location"]["positions"]["begin"]["line"] = match.lineno + issue["location"]["positions"]["begin"]["column"] = match.column + else: + issue["location"]["lines"] = {} + issue["location"]["lines"]["begin"] = match.lineno + if match.details: + issue["content"] = {} + issue["content"]["body"] = match.details + # Append issue to result list + result.append(issue) + + # Keep it single line due to https://github.com/ansible/ansible-navigator/issues/1490 + return json.dumps(result, sort_keys=False) + + @staticmethod + def _remap_severity(match: MatchError) -> str: + # level is not part of CodeClimate specification, but there is + # no other way to expose that info. We recommend switching to + # SARIF format which is better suited for interoperability. + # + # Out current implementation will return `major` for all errors and + # `warning` for all warnings. We may revisit this in the future. + if match.level == "warning": + return "minor" + return "major" + + +class SarifFormatter(BaseFormatter[Any]): + """Formatter for emitting violations in SARIF report format. + + The spec of SARIF can be found here: + https://docs.oasis-open.org/sarif/sarif/v2.1.0/ + """ + + BASE_URI_ID = "SRCROOT" + TOOL_NAME = "ansible-lint" + TOOL_URL = "https://github.com/ansible/ansible-lint" + SARIF_SCHEMA_VERSION = "2.1.0" + SARIF_SCHEMA = ( + "https://schemastore.azurewebsites.net/schemas/json/sarif-2.1.0-rtm.5.json" + ) + + def format_result(self, matches: list[MatchError]) -> str: + """Format a list of match errors as a JSON string.""" + if not isinstance(matches, list): + msg = f"The {self.__class__} was expecting a list of MatchError." + raise RuntimeError(msg) + + root_path = Path(str(self.base_dir)).as_uri() + root_path = root_path + "/" if not root_path.endswith("/") else root_path + rules, results = self._extract_results(matches) + + tool = { + "driver": { + "name": self.TOOL_NAME, + "version": __version__, + "informationUri": self.TOOL_URL, + "rules": rules, + }, + } + + runs = [ + { + "tool": tool, + "columnKind": "utf16CodeUnits", + "results": results, + "originalUriBaseIds": { + self.BASE_URI_ID: {"uri": root_path}, + }, + }, + ] + + report = { + "$schema": self.SARIF_SCHEMA, + "version": self.SARIF_SCHEMA_VERSION, + "runs": runs, + } + # Keep it single line due to https://github.com/ansible/ansible-navigator/issues/1490 + return json.dumps(report, sort_keys=False) + + def _extract_results( + self, + matches: list[MatchError], + ) -> tuple[list[Any], list[Any]]: + rules = {} + results = [] + for match in matches: + if match.tag not in rules: + rules[match.tag] = self._to_sarif_rule(match) + results.append(self._to_sarif_result(match)) + return list(rules.values()), results + + def _to_sarif_rule(self, match: MatchError) -> dict[str, Any]: + rule: dict[str, Any] = { + "id": match.tag, + "name": match.tag, + "shortDescription": { + "text": str(match.message), + }, + "defaultConfiguration": { + "level": self._to_sarif_level(match), + }, + "help": { + "text": str(match.rule.description), + }, + "helpUri": match.rule.url, + "properties": {"tags": match.rule.tags}, + } + return rule + + def _to_sarif_result(self, match: MatchError) -> dict[str, Any]: + result: dict[str, Any] = { + "ruleId": match.tag, + "message": { + "text": str(match.details) + if str(match.details) + else str(match.message), + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": self._format_path(match.filename or ""), + "uriBaseId": self.BASE_URI_ID, + }, + "region": { + "startLine": match.lineno, + }, + }, + }, + ], + } + if match.column: + result["locations"][0]["physicalLocation"]["region"][ + "startColumn" + ] = match.column + return result + + @staticmethod + def _to_sarif_level(match: MatchError) -> str: + # sarif accepts only 4 levels: error, warning, note, none + return match.level diff --git a/src/ansiblelint/generate_docs.py b/src/ansiblelint/generate_docs.py new file mode 100644 index 0000000..1498a67 --- /dev/null +++ b/src/ansiblelint/generate_docs.py @@ -0,0 +1,125 @@ +"""Utils to generate rules documentation.""" +import logging +from collections.abc import Iterable + +from rich import box +from rich.console import RenderableType, group +from rich.markdown import Markdown +from rich.table import Table + +from ansiblelint.config import PROFILES +from ansiblelint.constants import RULE_DOC_URL +from ansiblelint.rules import RulesCollection + +DOC_HEADER = """ +# Default Rules + +(lint_default_rules)= + +Below you can see the list of default rules Ansible Lint use to evaluate playbooks and roles: + +""" + +_logger = logging.getLogger(__name__) + + +def rules_as_str(rules: RulesCollection) -> RenderableType: + """Return rules as string.""" + table = Table(show_header=False, header_style="title", box=box.SIMPLE) + for rule in rules.alphabetical(): + tag = f"[dim] ({', '.join(rule.tags)})[/dim]" if rule.tags else "" + table.add_row( + f"[link={RULE_DOC_URL}{rule.id}/]{rule.id}[/link]", + rule.shortdesc + tag, + ) + return table + + +def rules_as_md(rules: RulesCollection) -> str: + """Return md documentation for a list of rules.""" + result = DOC_HEADER + + for rule in rules.alphabetical(): + # because title == rule.id we get the desired labels for free + # and we do not have to insert `(target_header)=` + title = f"{rule.id}" + + if rule.help: + if not rule.help.startswith(f"# {rule.id}"): # pragma: no cover + msg = f"Rule {rule.__class__} markdown help does not start with `# {rule.id}` header.\n{rule.help}" + raise RuntimeError(msg) + result += f"\n\n{rule.help}" + else: + description = rule.description + if rule.link: + description += f" [more]({rule.link})" + + result += f"\n\n## {title}\n\n**{rule.shortdesc}**\n\n{description}" + + return result + + +@group() +def rules_as_rich(rules: RulesCollection) -> Iterable[Table]: + """Print documentation for a list of rules, returns empty string.""" + width = max(16, *[len(rule.id) for rule in rules]) + for rule in rules.alphabetical(): + table = Table(show_header=True, header_style="title", box=box.MINIMAL) + table.add_column(rule.id, style="dim", width=width) + table.add_column(Markdown(rule.shortdesc)) + + description = rule.help or rule.description + if rule.link: + description += f" [(more)]({rule.link})" + table.add_row("description", Markdown(description)) + if rule.version_added: + table.add_row("version_added", rule.version_added) + if rule.tags: + table.add_row("tags", ", ".join(rule.tags)) + if rule.severity: + table.add_row("severity", rule.severity) + yield table + + +def profiles_as_md(*, header: bool = False, docs_url: str = RULE_DOC_URL) -> str: + """Return markdown representation of supported profiles.""" + result = "" + + if header: + result += """<!--- +Do not manually edit, generated from generate_docs.py +--> +# Profiles + +Ansible-lint profiles gradually increase the strictness of rules as your Ansible content lifecycle. + +!!! note + + Rules with `*` in the suffix are not yet implemented but are documented with linked GitHub issues. + +""" + + for name, profile in PROFILES.items(): + extends = "" + if profile.get("extends", None): + extends = ( + f" It extends [{profile['extends']}](#{profile['extends']}) profile." + ) + result += f"## {name}\n\n{profile['description']}{extends}\n" + for rule, rule_data in profile["rules"].items(): + if "[" in rule: + url = f"{docs_url}{rule.split('[')[0]}/" + else: + url = f"{docs_url}{rule}/" + if not rule_data: + result += f"- [{rule}]({url})\n" + else: + result += f"- [{rule}]({rule_data['url']})\n" + + result += "\n" + return result + + +def profiles_as_rich() -> Markdown: + """Return rich representation of supported profiles.""" + return Markdown(profiles_as_md()) diff --git a/src/ansiblelint/loaders.py b/src/ansiblelint/loaders.py new file mode 100644 index 0000000..49e38f1 --- /dev/null +++ b/src/ansiblelint/loaders.py @@ -0,0 +1,75 @@ +"""Utilities for loading various files.""" +from __future__ import annotations + +import logging +import os +from collections import defaultdict, namedtuple +from functools import partial +from typing import TYPE_CHECKING, Any + +import yaml +from yaml import YAMLError + +try: + from yaml import CFullLoader as FullLoader + from yaml import CSafeLoader as SafeLoader +except (ImportError, AttributeError): + from yaml import FullLoader, SafeLoader # type: ignore[assignment] + +if TYPE_CHECKING: + from pathlib import Path + +IgnoreFile = namedtuple("IgnoreFile", "default alternative") +IGNORE_FILE = IgnoreFile(".ansible-lint-ignore", ".config/ansible-lint-ignore.txt") + +yaml_load = partial(yaml.load, Loader=FullLoader) +yaml_load_safe = partial(yaml.load, Loader=SafeLoader) +_logger = logging.getLogger(__name__) + + +def yaml_from_file(filepath: str | Path) -> Any: + """Return a loaded YAML file.""" + with open(str(filepath), encoding="utf-8") as content: + return yaml_load(content) + + +def load_ignore_txt(filepath: Path | None = None) -> dict[str, set[str]]: + """Return a list of rules to ignore.""" + result = defaultdict(set) + + ignore_file = None + + if filepath: + if os.path.isfile(filepath): + ignore_file = str(filepath) + else: + _logger.error("Ignore file not found '%s'", ignore_file) + elif os.path.isfile(IGNORE_FILE.default): + ignore_file = IGNORE_FILE.default + elif os.path.isfile(IGNORE_FILE.alternative): + ignore_file = IGNORE_FILE.alternative + + if ignore_file: + with open(ignore_file, encoding="utf-8") as _ignore_file: + _logger.debug("Loading ignores from '%s'", ignore_file) + for line in _ignore_file: + entry = line.split("#")[0].rstrip() + if entry: + try: + path, rule = entry.split() + except ValueError as exc: + msg = f"Unable to parse line '{line}' from {ignore_file} file." + raise RuntimeError(msg) from exc + result[path].add(rule) + + return result + + +__all__ = [ + "load_ignore_txt", + "yaml_from_file", + "yaml_load", + "yaml_load_safe", + "YAMLError", + "IGNORE_FILE", +] diff --git a/src/ansiblelint/logger.py b/src/ansiblelint/logger.py new file mode 100644 index 0000000..f0477cd --- /dev/null +++ b/src/ansiblelint/logger.py @@ -0,0 +1,31 @@ +"""Utils related to logging.""" +import logging +import time +from collections.abc import Iterator +from contextlib import contextmanager +from typing import Any + +_logger = logging.getLogger(__name__) + + +@contextmanager +def timed_info(msg: Any, *args: Any) -> Iterator[None]: + """Context manager for logging slow operations, mentions duration.""" + start = time.time() + try: + yield + finally: + elapsed = time.time() - start + _logger.info(msg + " (%.2fs)", *(*args, elapsed)) # noqa: G003 + + +def warn_or_fail(message: str) -> None: + """Warn or fail depending on the strictness level.""" + # pylint: disable=import-outside-toplevel + from ansiblelint.config import options + from ansiblelint.errors import StrictModeError + + if options.strict: + raise StrictModeError(message) + + _logger.warning(message) diff --git a/src/ansiblelint/py.typed b/src/ansiblelint/py.typed new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/ansiblelint/py.typed diff --git a/src/ansiblelint/rules/__init__.py b/src/ansiblelint/rules/__init__.py new file mode 100644 index 0000000..acb7df1 --- /dev/null +++ b/src/ansiblelint/rules/__init__.py @@ -0,0 +1,560 @@ +"""All internal ansible-lint rules.""" +from __future__ import annotations + +import copy +import inspect +import logging +import re +import sys +from collections import defaultdict +from collections.abc import Iterable, Iterator, MutableMapping, MutableSequence +from importlib import import_module +from pathlib import Path +from typing import TYPE_CHECKING, Any, cast + +import ansiblelint.skip_utils +import ansiblelint.utils +import ansiblelint.yaml_utils +from ansiblelint._internal.rules import ( + AnsibleParserErrorRule, + BaseRule, + LoadingFailureRule, + RuntimeErrorRule, + WarningRule, +) +from ansiblelint.app import App, get_app +from ansiblelint.config import PROFILES, Options, get_rule_config +from ansiblelint.config import options as default_options +from ansiblelint.constants import LINE_NUMBER_KEY, RULE_DOC_URL, SKIPPED_RULES_KEY +from ansiblelint.errors import MatchError +from ansiblelint.file_utils import Lintable, expand_paths_vars + +if TYPE_CHECKING: + from ruamel.yaml.comments import CommentedMap, CommentedSeq + +_logger = logging.getLogger(__name__) + +match_types = { + "matchlines": "line", + "match": "line", # called by matchlines + "matchtasks": "task", + "matchtask": "task", # called by matchtasks + "matchyaml": "yaml", + "matchplay": "play", # called by matchyaml + "matchdir": "dir", +} + + +class AnsibleLintRule(BaseRule): + """AnsibleLintRule should be used as base for writing new rules.""" + + @property + def url(self) -> str: + """Return rule documentation url.""" + return RULE_DOC_URL + self.id + "/" + + @property + def rule_config(self) -> dict[str, Any]: + """Retrieve rule specific configuration.""" + return get_rule_config(self.id) + + def get_config(self, key: str) -> Any: + """Return a configured value for given key string.""" + return self.rule_config.get(key, None) + + @staticmethod + def unjinja(text: str) -> str: + """Remove jinja2 bits from a string.""" + text = re.sub(r"{{.+?}}", "JINJA_EXPRESSION", text) + text = re.sub(r"{%.+?%}", "JINJA_STATEMENT", text) + text = re.sub(r"{#.+?#}", "JINJA_COMMENT", text) + return text + + # pylint: disable=too-many-arguments + def create_matcherror( + self, + message: str = "", + lineno: int = 1, + details: str = "", + filename: Lintable | None = None, + tag: str = "", + ) -> MatchError: + """Instantiate a new MatchError.""" + match = MatchError( + message=message, + lineno=lineno, + details=details, + lintable=filename or Lintable(""), + rule=copy.copy(self), + tag=tag, + ) + # search through callers to find one of the match* methods + frame = inspect.currentframe() + match_type: str | None = None + while not match_type and frame is not None: + func_name = frame.f_code.co_name + match_type = match_types.get(func_name, None) + if match_type: + # add the match_type to the match + match.match_type = match_type + break + frame = frame.f_back # get the parent frame for the next iteration + return match + + @staticmethod + def _enrich_matcherror_with_task_details( + match: MatchError, + task: ansiblelint.utils.Task, + ) -> None: + match.task = task + if not match.details: + match.details = "Task/Handler: " + ansiblelint.utils.task_to_str(task) + if match.lineno < task[LINE_NUMBER_KEY]: + match.lineno = task[LINE_NUMBER_KEY] + + def matchlines(self, file: Lintable) -> list[MatchError]: + matches: list[MatchError] = [] + # arrays are 0-based, line numbers are 1-based + # so use prev_line_no as the counter + for prev_line_no, line in enumerate(file.content.split("\n")): + if line.lstrip().startswith("#"): + continue + + rule_id_list = ansiblelint.skip_utils.get_rule_skips_from_line( + line, + lintable=file, + ) + if self.id in rule_id_list: + continue + + result = self.match(line) + if not result: + continue + message = "" + if isinstance(result, str): + message = result + matcherror = self.create_matcherror( + message=message, + lineno=prev_line_no + 1, + details=line, + filename=file, + ) + matches.append(matcherror) + return matches + + def matchtasks(self, file: Lintable) -> list[MatchError]: + """Call matchtask for each task inside file and return aggregate results. + + Most rules will never need to override matchtasks because its main + purpose is to call matchtask for each task/handlers in the same file, + and to aggregate the results. + """ + matches: list[MatchError] = [] + if ( + file.kind not in ["handlers", "tasks", "playbook"] + or str(file.base_kind) != "text/yaml" + ): + return matches + + for task in ansiblelint.utils.task_in_list( + data=file.data, + kind=file.kind, + file=file, + ): + if task.error is not None: + # normalize_task converts AnsibleParserError to MatchError + return [task.error] + + if ( + self.id in task.skip_tags + or ("action" not in task.normalized_task) + or "skip_ansible_lint" in task.normalized_task.get("tags", []) + ): + continue + + if self.needs_raw_task: + task.normalized_task["__raw_task__"] = task.raw_task + + result = self.matchtask(task, file=file) + if not result: + continue + + if isinstance(result, Iterable) and not isinstance( + result, + str, + ): # list[MatchError] + # https://github.com/PyCQA/pylint/issues/6044 + # pylint: disable=not-an-iterable + for match in result: + if match.tag in task.skip_tags: + continue + self._enrich_matcherror_with_task_details( + match, + task, + ) + matches.append(match) + continue + if isinstance(result, MatchError): + if result.tag in task.skip_tags: + continue + match = result + else: # bool or string + message = "" + if isinstance(result, str): + message = result + match = self.create_matcherror( + message=message, + lineno=task.normalized_task[LINE_NUMBER_KEY], + filename=file, + ) + + self._enrich_matcherror_with_task_details(match, task) + matches.append(match) + return matches + + def matchyaml(self, file: Lintable) -> list[MatchError]: + matches: list[MatchError] = [] + if str(file.base_kind) != "text/yaml": + return matches + + yaml = file.data + # yaml returned can be an AnsibleUnicode (a string) when the yaml + # file contains a single string. YAML spec allows this but we consider + # this an fatal error. + if isinstance(yaml, str): + if yaml.startswith("$ANSIBLE_VAULT"): + return [] + return [MatchError(lintable=file, rule=LoadingFailureRule())] + if not yaml: + return matches + + if isinstance(yaml, dict): + yaml = [yaml] + + for play in yaml: + # Bug #849 + if play is None: + continue + + if self.id in play.get(SKIPPED_RULES_KEY, ()): + continue + + if "skip_ansible_lint" in play.get("tags", []): + continue + + matches.extend(self.matchplay(file, play)) + + return matches + + +class TransformMixin: + """A mixin for AnsibleLintRule to enable transforming files. + + If ansible-lint is started with the ``--write`` option, then the ``Transformer`` + will call the ``transform()`` method for every MatchError identified if the rule + that identified it subclasses this ``TransformMixin``. Only the rule that identified + a MatchError can do transforms to fix that match. + """ + + def transform( + self, + match: MatchError, + lintable: Lintable, + data: CommentedMap | CommentedSeq | str, + ) -> None: + """Transform ``data`` to try to fix the MatchError identified by this rule. + + The ``match`` was generated by this rule in the ``lintable`` file. + When ``transform()`` is called on a rule, the rule should either fix the + issue, if possible, or make modifications that make it easier to fix manually. + + The transform must set ``match.fixed = True`` when data has been transformed to + fix the error. + + For YAML files, ``data`` is an editable YAML dict/array that preserves + any comments that were in the original file. + + .. code:: python + + data[0]["tasks"][0]["when"] = False + + This is easier with the ``seek()`` utility method: + + .. code :: python + + target_task = self.seek(match.yaml_path, data) + target_task["when"] = False + + For any files that aren't YAML, ``data`` is the loaded file's content as a string. + To edit non-YAML files, save the updated contents in ``lintable.content``: + + .. code:: python + + new_data = self.do_something_to_fix_the_match(data) + lintable.content = new_data + """ + + @staticmethod + def seek( + yaml_path: list[int | str], + data: MutableMapping[str, Any] | MutableSequence[Any] | str, + ) -> Any: + """Get the element identified by ``yaml_path`` in ``data``. + + Rules that work with YAML need to seek, or descend, into nested YAML data + structures to perform the relevant transforms. For example: + + .. code:: python + + def transform(self, match, lintable, data): + target_task = self.seek(match.yaml_path, data) + # transform target_task + """ + if isinstance(data, str): + # can't descend into a string + return data + target = data + for segment in yaml_path: + # The cast() calls tell mypy what types we expect. + # Essentially this does: + if isinstance(segment, str): + target = cast(MutableMapping[str, Any], target)[segment] + elif isinstance(segment, int): + target = cast(MutableSequence[Any], target)[segment] + return target + + +# pylint: disable=too-many-nested-blocks +def load_plugins( + dirs: list[str], +) -> Iterator[AnsibleLintRule]: + """Yield a rule class.""" + + def all_subclasses(cls: type) -> set[type]: + return set(cls.__subclasses__()).union( + [s for c in cls.__subclasses__() for s in all_subclasses(c)], + ) + + orig_sys_path = sys.path.copy() + + for directory in dirs: + if directory not in sys.path: + sys.path.append(str(directory)) + + # load all modules in the directory + for f in Path(directory).glob("*.py"): + if "__" not in f.stem and f.stem not in "conftest": + import_module(f"{f.stem}") + # restore sys.path + sys.path = orig_sys_path + + rules: dict[str, BaseRule] = {} + for rule in all_subclasses(BaseRule): + # we do not return the rules that are not loaded from passed 'directory' + # or rules that do not have a valid id. For example, during testing + # python may load other rule classes, some outside the tested rule + # directories. + if ( + rule.id # type: ignore[attr-defined] + and Path(inspect.getfile(rule)).parent.absolute() + in [Path(x).absolute() for x in dirs] + and issubclass(rule, BaseRule) + and rule.id not in rules + ): + rules[rule.id] = rule() + for rule in rules.values(): # type: ignore[assignment] + if isinstance(rule, AnsibleLintRule) and bool(rule.id): + yield rule + + +class RulesCollection: + """Container for a collection of rules.""" + + def __init__( + self, + rulesdirs: list[str] | list[Path] | None = None, + options: Options | None = None, + profile_name: str | None = None, + *, + conditional: bool = True, + app: App | None = None, + ) -> None: + """Initialize a RulesCollection instance.""" + if options is None: + self.options = copy.deepcopy(default_options) + # When initialized without options argument we want it to always + # be offline as this is done only during testing. + self.options.offline = True + else: + self.options = options + self.profile = [] + self.app = app or get_app(offline=True) + + if profile_name: + self.profile = PROFILES[profile_name] + rulesdirs_str = [] if rulesdirs is None else [str(r) for r in rulesdirs] + self.rulesdirs = expand_paths_vars(rulesdirs_str) + self.rules: list[BaseRule] = [] + # internal rules included in order to expose them for docs as they are + # not directly loaded by our rule loader. + self.rules.extend( + [ + RuntimeErrorRule(), + AnsibleParserErrorRule(), + LoadingFailureRule(), + WarningRule(), + ], + ) + for rule in load_plugins(rulesdirs_str): + self.register(rule, conditional=conditional) + self.rules = sorted(self.rules) + + # When we have a profile we unload some of the rules + # But we do include all rules when listing all rules or tags + if profile_name and not (self.options.list_rules or self.options.list_tags): + filter_rules_with_profile(self.rules, profile_name) + + def register(self, obj: AnsibleLintRule, *, conditional: bool = False) -> None: + """Register a rule.""" + # We skip opt-in rules which were not manually enabled. + # But we do include opt-in rules when listing all rules or tags + obj._collection = self # pylint: disable=protected-access # noqa: SLF001 + if any( + [ + not conditional, + self.profile, # when profile is used we load all rules and filter later + "opt-in" not in obj.tags, + obj.id in self.options.enable_list, + self.options.list_rules, + self.options.list_tags, + ], + ): + self.rules.append(obj) + + def __iter__(self) -> Iterator[BaseRule]: + """Return the iterator over the rules in the RulesCollection.""" + return iter(sorted(self.rules)) + + def alphabetical(self) -> Iterator[BaseRule]: + """Return an iterator over the rules in the RulesCollection in alphabetical order.""" + return iter(sorted(self.rules, key=lambda x: x.id)) + + def __len__(self) -> int: + """Return the length of the RulesCollection data.""" + return len(self.rules) + + def extend(self, more: list[AnsibleLintRule]) -> None: + """Combine rules.""" + self.rules.extend(more) + + def run( + self, + file: Lintable, + tags: set[str] | None = None, + skip_list: list[str] | None = None, + ) -> list[MatchError]: + """Run all the rules against the given lintable.""" + matches: list[MatchError] = [] + if tags is None: + tags = set() + if skip_list is None: + skip_list = [] + + if not file.path.is_dir(): + try: + if file.content is not None: # loads the file content + pass + except (OSError, UnicodeDecodeError) as exc: + return [ + MatchError( + message=str(exc), + lintable=file, + rule=LoadingFailureRule(), + tag=f"{LoadingFailureRule.id}[{exc.__class__.__name__.lower()}]", + ), + ] + + for rule in self.rules: + if rule.id == "syntax-check": + continue + if ( + not tags + or rule.has_dynamic_tags + or not set(rule.tags).union([rule.id]).isdisjoint(tags) + ): + rule_definition = set(rule.tags) + rule_definition.add(rule.id) + if set(rule_definition).isdisjoint(skip_list): + matches.extend(rule.getmatches(file)) + + # some rules can produce matches with tags that are inside our + # skip_list, so we need to cleanse the matches + matches = [m for m in matches if m.tag not in skip_list] + + return matches + + def __repr__(self) -> str: + """Return a RulesCollection instance representation.""" + return "\n".join( + [rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)], + ) + + def list_tags(self) -> str: + """Return a string with all the tags in the RulesCollection.""" + tag_desc = { + "command-shell": "Specific to use of command and shell modules", + "core": "Related to internal implementation of the linter", + "deprecations": "Indicate use of features that are removed from Ansible", + "experimental": "Newly introduced rules, by default triggering only warnings", + "formatting": "Related to code-style", + "idempotency": "Possible indication that consequent runs would produce different results", + "idiom": "Anti-pattern detected, likely to cause undesired behavior", + "metadata": "Invalid metadata, likely related to galaxy, collections or roles", + "opt-in": "Rules that are not used unless manually added to `enable_list`", + "security": "Rules related o potentially security issues, like exposing credentials", + "syntax": "Related to wrong or deprecated syntax", + "unpredictability": "Warn about code that might not work in a predictable way", + "unskippable": "Indicate a fatal error that cannot be ignored or disabled", + "yaml": "External linter which will also produce its own rule codes", + } + + tags = defaultdict(list) + for rule in self.rules: + # Fail early if a rule does not have any of our required tags + if not set(rule.tags).intersection(tag_desc.keys()): + msg = f"Rule {rule} does not have any of the required tags: {', '.join(tag_desc.keys())}" + raise RuntimeError(msg) + for tag in rule.tags: + for id_ in rule.ids(): + tags[tag].append(id_) + result = "# List of tags and rules they cover\n" + for tag in sorted(tags): + desc = tag_desc.get(tag, None) + if desc: + result += f"{tag}: # {desc}\n" + else: + result += f"{tag}:\n" + for name in sorted(tags[tag]): + result += f" - {name}\n" + return result + + +def filter_rules_with_profile(rule_col: list[BaseRule], profile: str) -> None: + """Unload rules that are not part of the specified profile.""" + included = set() + extends = profile + total_rules = len(rule_col) + while extends: + for rule in PROFILES[extends]["rules"]: + _logger.debug("Activating rule `%s` due to profile `%s`", rule, extends) + included.add(rule) + extends = PROFILES[extends].get("extends", None) + for rule in rule_col.copy(): + if rule.id not in included: + _logger.debug( + "Unloading %s rule due to not being part of %s profile.", + rule.id, + profile, + ) + rule_col.remove(rule) + _logger.debug("%s/%s rules included in the profile", len(rule_col), total_rules) diff --git a/src/ansiblelint/rules/args.md b/src/ansiblelint/rules/args.md new file mode 100644 index 0000000..567d0fd --- /dev/null +++ b/src/ansiblelint/rules/args.md @@ -0,0 +1,91 @@ +# args + +This rule validates if the task arguments conform with the plugin documentation. + +The rule validation will check if the option name is valid and has the correct +value along with conditionals on the options like `mutually_exclusive`, +`required_together`, `required_one_of` and so on. + +For more information see the +[argument spec validator](https://docs.ansible.com/ansible/latest/reference_appendices/module_utils.html#argumentspecvalidator) +topic in the Ansible module utility documentation. + +Possible messages: + +- `args[module]` - missing required arguments: ... +- `args[module]` - missing parameter(s) required by ... + +## Problematic Code + +```yaml +--- +- name: Fixture to validate module options failure scenarios + hosts: localhost + tasks: + - name: Clone content repository + ansible.builtin.git: # <- Required option `repo` is missing. + dest: /home/www + accept_hostkey: true + version: master + update: false + + - name: Enable service httpd and ensure it is not masked + ansible.builtin.systemd: # <- Missing 'name' parameter required by 'enabled'. + enabled: true + masked: false + + - name: Use quiet to avoid verbose output + ansible.builtin.assert: + test: + - my_param <= 100 + - my_param >= 0 + quiet: invalid # <- Value for option `quiet` is invalid. +``` + +## Correct Code + +```yaml +--- +- name: Fixture to validate module options pass scenario + hosts: localhost + tasks: + - name: Clone content repository + ansible.builtin.git: # <- Contains required option `repo`. + repo: https://github.com/ansible/ansible-examples + dest: /home/www + accept_hostkey: true + version: master + update: false + + - name: Enable service httpd and ensure it is not masked + ansible.builtin.systemd: # <- Contains 'name' parameter required by 'enabled'. + name: httpd + enabled: false + masked: false + + - name: Use quiet to avoid verbose output + ansible.builtin.assert: + that: + - my_param <= 100 + - my_param >= 0 + quiet: True # <- Has correct type value for option `quiet` which is boolean. +``` + +## Special cases + +In some complex cases where you are using jinja expressions, the linter may not +able to fully validate all the possible values and report a false positive. The +example below would usually report +`parameters are mutually exclusive: data|file|keyserver|url` but because we +added `# noqa: args[module]` it will just pass. + +```yaml +- name: Add apt keys # noqa: args[module] + become: true + ansible.builtin.apt_key: + url: "{{ zj_item['url'] | default(omit) }}" + data: "{{ zj_item['data'] | default(omit) }}" + loop: "{{ repositories_keys }}" + loop_control: + loop_var: zj_item +``` diff --git a/src/ansiblelint/rules/args.py b/src/ansiblelint/rules/args.py new file mode 100644 index 0000000..2acf32e --- /dev/null +++ b/src/ansiblelint/rules/args.py @@ -0,0 +1,310 @@ +"""Rule definition to validate task options.""" +from __future__ import annotations + +import contextlib +import importlib.util +import io +import json +import logging +import re +import sys +from functools import lru_cache +from typing import TYPE_CHECKING, Any + +# pylint: disable=preferred-module +from unittest import mock +from unittest.mock import patch + +# pylint: disable=reimported +import ansible.module_utils.basic as mock_ansible_module +from ansible.module_utils import basic +from ansible.plugins.loader import PluginLoadContext, module_loader + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule, RulesCollection +from ansiblelint.text import has_jinja +from ansiblelint.yaml_utils import clean_json + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +_logger = logging.getLogger(__name__) + +ignored_re = re.compile( + "|".join( # noqa: FLY002 + [ + r"^parameters are mutually exclusive:", + # https://github.com/ansible/ansible-lint/issues/3128 as strings can be jinja + # Do not remove unless you manually test if the original example + # from the bug does not trigger the rule anymore. We were not able + # to add a regression test because it would involve installing this + # collection. Attempts to reproduce same bug with other collections + # failed, even if the message originates from Ansible core. + r"^unable to evaluate string as dictionary$", + ], + ), + flags=re.MULTILINE | re.DOTALL, +) + +workarounds_drop_map = { + # https://github.com/ansible/ansible-lint/issues/3110 + "ansible.builtin.copy": ["decrypt"], + # https://github.com/ansible/ansible-lint/issues/2824#issuecomment-1354337466 + # https://github.com/ansible/ansible-lint/issues/3138 + "ansible.builtin.service": ["daemon_reload", "use"], + # Avoid: Unsupported parameters for (basic.py) module: cmd. Supported parameters include: _raw_params, _uses_shell, argv, chdir, creates, executable, removes, stdin, stdin_add_newline, strip_empty_ends. + "ansible.builtin.command": ["cmd"], + # https://github.com/ansible/ansible-lint/issues/3152 + "ansible.posix.synchronize": ["use_ssh_args"], +} +workarounds_inject_map = { + # https://github.com/ansible/ansible-lint/issues/2824 + "ansible.builtin.async_status": {"_async_dir": "/tmp/ansible-async"}, +} + + +@lru_cache +def load_module(module_name: str) -> PluginLoadContext: + """Load plugin from module name and cache it.""" + return module_loader.find_plugin_with_context(module_name) + + +class ValidationPassedError(Exception): + """Exception to be raised when validation passes.""" + + +class CustomAnsibleModule(basic.AnsibleModule): # type: ignore[misc] + """Mock AnsibleModule class.""" + + def __init__(self, *args: str, **kwargs: str) -> None: + """Initialize AnsibleModule mock.""" + super().__init__(*args, **kwargs) + raise ValidationPassedError + + +class ArgsRule(AnsibleLintRule): + """Validating module arguments.""" + + id = "args" + severity = "HIGH" + description = "Check whether tasks are using correct module options." + tags = ["syntax", "experimental"] + version_added = "v6.10.0" + module_aliases: dict[str, str] = {"block/always/rescue": "block/always/rescue"} + _ids = { + "args[module]": description, + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + # pylint: disable=too-many-locals,too-many-return-statements + results: list[MatchError] = [] + module_name = task["action"]["__ansible_module_original__"] + failed_msg = None + + if module_name in self.module_aliases: + return [] + + loaded_module = load_module(module_name) + + # https://github.com/ansible/ansible-lint/issues/3200 + # since "ps1" modules cannot be executed on POSIX platforms, we will + # avoid running this rule for such modules + if isinstance( + loaded_module.plugin_resolved_path, + str, + ) and loaded_module.plugin_resolved_path.endswith(".ps1"): + return [] + + module_args = { + key: value + for key, value in task["action"].items() + if not key.startswith("__") + } + + # Return if 'args' is jinja string + # https://github.com/ansible/ansible-lint/issues/3199 + if ( + "args" in task.raw_task + and isinstance(task.raw_task["args"], str) + and has_jinja(task.raw_task["args"]) + ): + return [] + + if loaded_module.resolved_fqcn in workarounds_inject_map: + module_args.update(workarounds_inject_map[loaded_module.resolved_fqcn]) + if loaded_module.resolved_fqcn in workarounds_drop_map: + for key in workarounds_drop_map[loaded_module.resolved_fqcn]: + if key in module_args: + del module_args[key] + + with mock.patch.object( + mock_ansible_module, + "AnsibleModule", + CustomAnsibleModule, + ): + spec = importlib.util.spec_from_file_location( + name=loaded_module.resolved_fqcn, + location=loaded_module.plugin_resolved_path, + ) + if spec: + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + else: + assert file is not None + _logger.warning( + "Unable to load module %s at %s:%s for options validation", + module_name, + file.filename, + task[LINE_NUMBER_KEY], + ) + return [] + + try: + if not hasattr(module, "main"): + # skip validation for module options that are implemented as action plugin + # as the option values can be changed in action plugin and are not passed + # through `ArgumentSpecValidator` class as in case of modules. + return [] + + with patch.object( + sys, + "argv", + ["", json.dumps({"ANSIBLE_MODULE_ARGS": clean_json(module_args)})], + ): + fio = io.StringIO() + failed_msg = "" + # Warning: avoid running anything while stdout is redirected + # as what happens may be very hard to debug. + with contextlib.redirect_stdout(fio): + # pylint: disable=protected-access + basic._ANSIBLE_ARGS = None # noqa: SLF001 + try: + module.main() + except SystemExit: + failed_msg = fio.getvalue() + if failed_msg: + results.extend( + self._parse_failed_msg(failed_msg, task, module_name, file), + ) + + sanitized_results = self._sanitize_results(results, module_name) + return sanitized_results + except ValidationPassedError: + return [] + + # pylint: disable=unused-argument + def _sanitize_results( + self, + results: list[MatchError], + module_name: str, + ) -> list[MatchError]: + """Remove results that are false positive.""" + sanitized_results = [] + for result in results: + result_msg = result.message + if ignored_re.match(result_msg): + continue + sanitized_results.append(result) + + return sanitized_results + + def _parse_failed_msg( + self, + failed_msg: str, + task: dict[str, Any], + module_name: str, + file: Lintable | None = None, + ) -> list[MatchError]: + """Parse failed message and return list of MatchError.""" + results: list[MatchError] = [] + try: + failed_obj = json.loads(failed_msg) + error_message = failed_obj["msg"] + except json.decoder.JSONDecodeError: + error_message = failed_msg + + option_type_check_error = re.search( + r"argument '(?P<name>.*)' is of type", + error_message, + ) + if option_type_check_error: + # ignore options with templated variable value with type check errors + option_key = option_type_check_error.group("name") + option_value = task["action"][option_key] + if has_jinja(option_value): + _logger.debug( + "Type checking ignored for '%s' option in task '%s' at line %s.", + option_key, + module_name, + task[LINE_NUMBER_KEY], + ) + return results + + value_not_in_choices_error = re.search( + r"value of (?P<name>.*) must be one of:", + error_message, + ) + if value_not_in_choices_error: + # ignore templated value not in allowed choices + choice_key = value_not_in_choices_error.group("name") + choice_value = task["action"][choice_key] + if has_jinja(choice_value): + _logger.debug( + "Value checking ignored for '%s' option in task '%s' at line %s.", + choice_key, + module_name, + task[LINE_NUMBER_KEY], + ) + return results + + results.append( + self.create_matcherror( + message=error_message, + lineno=task[LINE_NUMBER_KEY], + tag="args[module]", + filename=file, + ), + ) + return results + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest # noqa: TCH002 + + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_args_module_fail(default_rules_collection: RulesCollection) -> None: + """Test rule invalid module options.""" + success = "examples/playbooks/rule-args-module-fail.yml" + results = Runner(success, rules=default_rules_collection).run() + assert len(results) == 5 + assert results[0].tag == "args[module]" + assert "missing required arguments" in results[0].message + assert results[1].tag == "args[module]" + assert "missing parameter(s) required by " in results[1].message + assert results[2].tag == "args[module]" + assert "Unsupported parameters for" in results[2].message + assert results[3].tag == "args[module]" + assert "Unsupported parameters for" in results[3].message + assert results[4].tag == "args[module]" + assert "value of state must be one of" in results[4].message + + def test_args_module_pass( + default_rules_collection: RulesCollection, + caplog: pytest.LogCaptureFixture, + ) -> None: + """Test rule valid module options.""" + success = "examples/playbooks/rule-args-module-pass.yml" + with caplog.at_level(logging.WARNING): + results = Runner(success, rules=default_rules_collection).run() + assert len(results) == 0, results + assert len(caplog.records) == 0, caplog.records diff --git a/src/ansiblelint/rules/avoid_implicit.md b/src/ansiblelint/rules/avoid_implicit.md new file mode 100644 index 0000000..4c3d781 --- /dev/null +++ b/src/ansiblelint/rules/avoid_implicit.md @@ -0,0 +1,37 @@ +# avoid-implicit + +This rule identifies the use of dangerous implicit behaviors, often also +undocumented. + +This rule will produce the following type of error messages: + +- `avoid-implicit[copy-content]` is not a string as [copy](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/copy_module.html#synopsis) + modules also accept these, but without documenting them. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Write file content + ansible.builtin.copy: + content: { "foo": "bar" } # <-- should use explicit jinja template + dest: /tmp/foo.txt +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Write file content + vars: + content: { "foo": "bar" } + ansible.builtin.copy: + content: "{{ content | to_json }}" # explicit better than implicit! + dest: /tmp/foo.txt +``` diff --git a/src/ansiblelint/rules/avoid_implicit.py b/src/ansiblelint/rules/avoid_implicit.py new file mode 100644 index 0000000..8d1fe26 --- /dev/null +++ b/src/ansiblelint/rules/avoid_implicit.py @@ -0,0 +1,61 @@ +"""Implementation of avoid-implicit rule.""" +# https://github.com/ansible/ansible-lint/issues/2501 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class AvoidImplicitRule(AnsibleLintRule): + """Rule that identifies use of undocumented or discouraged implicit behaviors.""" + + id = "avoid-implicit" + shortdesc = "Avoid implicit behaviors" + description = ( + "Items which are templated should use ``template`` instead of " + "``copy`` with ``content`` to ensure correctness." + ) + severity = "MEDIUM" + tags = ["unpredictability"] + version_added = "v6.8.0" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + """Confirm if current rule is matching a specific task.""" + if task["action"]["__ansible_module__"] == "copy": + content = task["action"].get("content", "") + if not isinstance(content, str): + return True + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_template_instead_of_copy_positive() -> None: + """Positive test for avoid-implicit.""" + collection = RulesCollection() + collection.register(AvoidImplicitRule()) + success = "examples/playbooks/rule-avoid-implicit-pass.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_template_instead_of_copy_negative() -> None: + """Negative test for avoid-implicit.""" + collection = RulesCollection() + collection.register(AvoidImplicitRule()) + failure = "examples/playbooks/rule-avoid-implicit-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 1 diff --git a/src/ansiblelint/rules/command_instead_of_module.md b/src/ansiblelint/rules/command_instead_of_module.md new file mode 100644 index 0000000..a4e69b0 --- /dev/null +++ b/src/ansiblelint/rules/command_instead_of_module.md @@ -0,0 +1,35 @@ +# command-instead-of-module + +This rule will recommend you to use a specific ansible module instead for tasks +that are better served by a module, as these are more reliable, provide better +messaging and usually have additional features like the ability to retry. + +In the unlikely case that the rule triggers false positives, you can disable it +by adding a comment like `# noqa: command-instead-of-module` to the same line. + +You can check the [source](https://github.com/ansible/ansible-lint/blob/main/src/ansiblelint/rules/command_instead_of_module.py) +of the rule for all the known commands that trigger the rule and their allowed +list arguments of exceptions and raise a pull request to improve them. + +## Problematic Code + +```yaml +--- +- name: Update apt cache + hosts: all + tasks: + - name: Run apt-get update + ansible.builtin.command: apt-get update # <-- better to use ansible.builtin.apt module +``` + +## Correct Code + +```yaml +--- +- name: Update apt cache + hosts: all + tasks: + - name: Run apt-get update + ansible.builtin.apt: + update_cache: true +``` diff --git a/src/ansiblelint/rules/command_instead_of_module.py b/src/ansiblelint/rules/command_instead_of_module.py new file mode 100644 index 0000000..068e430 --- /dev/null +++ b/src/ansiblelint/rules/command_instead_of_module.py @@ -0,0 +1,139 @@ +"""Implementation of command-instead-of-module rule.""" +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import annotations + +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import convert_to_boolean, get_first_cmd_arg, get_second_cmd_arg + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class CommandsInsteadOfModulesRule(AnsibleLintRule): + """Using command rather than module.""" + + id = "command-instead-of-module" + description = ( + "Executing a command when there is an Ansible module is generally a bad idea" + ) + severity = "HIGH" + tags = ["command-shell", "idiom"] + version_added = "historic" + + _commands = ["command", "shell"] + _modules = { + "apt-get": "apt-get", + "chkconfig": "service", + "curl": "get_url or uri", + "git": "git", + "hg": "hg", + "letsencrypt": "acme_certificate", + "mktemp": "tempfile", + "mount": "mount", + "patch": "patch", + "rpm": "yum or rpm_key", + "rsync": "synchronize", + "sed": "template, replace or lineinfile", + "service": "service", + "supervisorctl": "supervisorctl", + "svn": "subversion", + "systemctl": "systemd", + "tar": "unarchive", + "unzip": "unarchive", + "wget": "get_url or uri", + "yum": "yum", + } + + _executable_options = { + "git": ["branch", "log", "lfs"], + "systemctl": ["--version", "kill", "set-default", "show-environment", "status"], + "yum": ["clean"], + "rpm": ["--nodeps"], + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["action"]["__ansible_module__"] not in self._commands: + return False + + first_cmd_arg = get_first_cmd_arg(task) + second_cmd_arg = get_second_cmd_arg(task) + + if not first_cmd_arg: + return False + + executable = Path(first_cmd_arg).name + + if ( + second_cmd_arg + and executable in self._executable_options + and second_cmd_arg in self._executable_options[executable] + ): + return False + + if executable in self._modules and convert_to_boolean( + task["action"].get("warn", True), + ): + message = "{0} used in place of {1} module" + return message.format(executable, self._modules[executable]) + return False + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/playbooks/rule-command-instead-of-module-pass.yml", + 0, + id="pass", + ), + pytest.param( + "examples/playbooks/rule-command-instead-of-module-fail.yml", + 3, + id="fail", + ), + ), + ) + def test_command_instead_of_module( + default_rules_collection: RulesCollection, + file: str, + expected: int, + ) -> None: + """Validate that rule works as intended.""" + results = Runner(file, rules=default_rules_collection).run() + + for result in results: + assert result.rule.id == CommandsInsteadOfModulesRule.id, result + assert len(results) == expected diff --git a/src/ansiblelint/rules/command_instead_of_shell.md b/src/ansiblelint/rules/command_instead_of_shell.md new file mode 100644 index 0000000..0abf69d --- /dev/null +++ b/src/ansiblelint/rules/command_instead_of_shell.md @@ -0,0 +1,30 @@ +# command-instead-of-shell + +This rule identifies uses of `shell` modules instead of a `command` one when +this is not really needed. Shell is considerably slower than command and should +be avoided unless there is a special need for using shell features, like +environment variable expansion or chaining multiple commands using pipes. + +## Problematic Code + +```yaml +--- +- name: Problematic example + hosts: localhost + tasks: + - name: Echo a message + ansible.builtin.shell: echo hello # <-- command is better in this case + changed_when: false +``` + +## Correct Code + +```yaml +--- +- name: Correct example + hosts: localhost + tasks: + - name: Echo a message + ansible.builtin.command: echo hello + changed_when: false +``` diff --git a/src/ansiblelint/rules/command_instead_of_shell.py b/src/ansiblelint/rules/command_instead_of_shell.py new file mode 100644 index 0000000..346a071 --- /dev/null +++ b/src/ansiblelint/rules/command_instead_of_shell.py @@ -0,0 +1,97 @@ +"""Implementation of command-instead-of-shell rule.""" +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import get_cmd_args + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class UseCommandInsteadOfShellRule(AnsibleLintRule): + """Use shell only when shell functionality is required.""" + + id = "command-instead-of-shell" + description = ( + "Shell should only be used when piping, redirecting " + "or chaining commands (and Ansible would be preferred " + "for some of those!)" + ) + severity = "HIGH" + tags = ["command-shell", "idiom"] + version_added = "historic" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + # Use unjinja so that we don't match on jinja filters + # rather than pipes + if task["action"]["__ansible_module__"] in ["shell", "ansible.builtin.shell"]: + # Since Ansible 2.4, the `command` module does not accept setting + # the `executable`. If the user needs to set it, they have to use + # the `shell` module. + if "executable" in task["action"]: + return False + + jinja_stripped_cmd = self.unjinja(get_cmd_args(task)) + return not any(ch in jinja_stripped_cmd for ch in "&|<>;$\n*[]{}?`") + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/playbooks/rule-command-instead-of-shell-pass.yml", + 0, + id="good", + ), + pytest.param( + "examples/playbooks/rule-command-instead-of-shell-fail.yml", + 3, + id="bad", + ), + ), + ) + def test_rule_command_instead_of_shell( + default_rules_collection: RulesCollection, + file: str, + expected: int, + ) -> None: + """Validate that rule works as intended.""" + results = Runner(file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == UseCommandInsteadOfShellRule.id, result + assert len(results) == expected diff --git a/src/ansiblelint/rules/conftest.py b/src/ansiblelint/rules/conftest.py new file mode 100644 index 0000000..f4df7a5 --- /dev/null +++ b/src/ansiblelint/rules/conftest.py @@ -0,0 +1,3 @@ +"""Makes pytest fixtures available.""" +# pylint: disable=wildcard-import,unused-wildcard-import +from ansiblelint.testing.fixtures import * # noqa: F403 diff --git a/src/ansiblelint/rules/custom/__init__.py b/src/ansiblelint/rules/custom/__init__.py new file mode 100644 index 0000000..8c3e048 --- /dev/null +++ b/src/ansiblelint/rules/custom/__init__.py @@ -0,0 +1 @@ +"""A placeholder package for putting custom rules under this dir.""" diff --git a/src/ansiblelint/rules/deprecated_bare_vars.md b/src/ansiblelint/rules/deprecated_bare_vars.md new file mode 100644 index 0000000..9e2f15b --- /dev/null +++ b/src/ansiblelint/rules/deprecated_bare_vars.md @@ -0,0 +1,32 @@ +# deprecated-bare-vars + +This rule identifies possible confusing expressions where it is not clear if +a variable or string is to be used and asks for clarification. + +You should either use the full variable syntax ('{{{{ {0} }}}}') or, whenever +possible, convert it to a list of strings. + +## Problematic code + +```yaml +--- +- ansible.builtin.debug: + msg: "{{ item }}" + with_items: foo # <-- deprecated-bare-vars +``` + +## Correct code + +```yaml +--- +# if foo is not really a variable: +- ansible.builtin.debug: + msg: "{{ item }}" + with_items: + - foo + +# if foo is a variable: +- ansible.builtin.debug: + msg: "{{ item }}" + with_items: "{{ foo }}" +``` diff --git a/src/ansiblelint/rules/deprecated_bare_vars.py b/src/ansiblelint/rules/deprecated_bare_vars.py new file mode 100644 index 0000000..1756e92 --- /dev/null +++ b/src/ansiblelint/rules/deprecated_bare_vars.py @@ -0,0 +1,124 @@ +"""Implementation of deprecated-bare-vars rule.""" + +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from __future__ import annotations + +import os +import sys +from typing import TYPE_CHECKING, Any + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.text import has_glob, has_jinja + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class UsingBareVariablesIsDeprecatedRule(AnsibleLintRule): + """Using bare variables is deprecated.""" + + id = "deprecated-bare-vars" + description = ( + "Using bare variables is deprecated. Update your " + "playbooks so that the environment value uses the full variable " + "syntax ``{{ your_variable }}``" + ) + severity = "VERY_HIGH" + tags = ["deprecations"] + version_added = "historic" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + loop_type = next((key for key in task if key.startswith("with_")), None) + if loop_type: + if loop_type in [ + "with_nested", + "with_together", + "with_flattened", + "with_filetree", + "with_community.general.filetree", + ]: + # These loops can either take a list defined directly in the task + # or a variable that is a list itself. When a single variable is used + # we just need to check that one variable, and not iterate over it like + # it's a list. Otherwise, loop through and check all items. + items = task[loop_type] + if not isinstance(items, (list, tuple)): + items = [items] + for var in items: + return self._matchvar(var, task, loop_type) + elif loop_type == "with_subelements": + return self._matchvar(task[loop_type][0], task, loop_type) + elif loop_type in ["with_sequence", "with_ini", "with_inventory_hostnames"]: + pass + else: + return self._matchvar(task[loop_type], task, loop_type) + return False + + def _matchvar( + self, + varstring: str, + task: dict[str, Any], + loop_type: str, + ) -> bool | str: + if isinstance(varstring, str) and not has_jinja(varstring): + valid = loop_type == "with_fileglob" and bool( + has_jinja(varstring) or has_glob(varstring), + ) + + valid |= loop_type == "with_filetree" and bool( + has_jinja(varstring) or varstring.endswith(os.sep), + ) + if not valid: + message = "Possible bare variable '{0}' used in a '{1}' loop. You should use the full variable syntax ('{{{{ {0} }}}}') or convert it to a list if that is not really a variable." + return message.format(task[loop_type], loop_type) + return False + + +if "pytest" in sys.modules: + import pytest + + # pylint: disable=ungrouped-imports + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner + + @pytest.mark.filterwarnings("ignore::ansible_compat.runtime.AnsibleWarning") + def test_use_bare_positive() -> None: + """Positive test for deprecated-bare-vars.""" + collection = RulesCollection() + collection.register(UsingBareVariablesIsDeprecatedRule()) + success = "examples/playbooks/rule-deprecated-bare-vars-pass.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_use_bare_negative() -> None: + """Negative test for deprecated-bare-vars.""" + collection = RulesCollection() + collection.register(UsingBareVariablesIsDeprecatedRule()) + failure = "examples/playbooks/rule-deprecated-bare-vars-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 12 diff --git a/src/ansiblelint/rules/deprecated_local_action.md b/src/ansiblelint/rules/deprecated_local_action.md new file mode 100644 index 0000000..c52eb9d --- /dev/null +++ b/src/ansiblelint/rules/deprecated_local_action.md @@ -0,0 +1,21 @@ +# deprecated-local-action + +This rule recommends using `delegate_to: localhost` instead of the +`local_action`. + +## Problematic Code + +```yaml +--- +- name: Task example + local_action: # <-- this is deprecated + module: ansible.builtin.debug +``` + +## Correct Code + +```yaml +- name: Task example + ansible.builtin.debug: + delegate_to: localhost # <-- recommended way to run on localhost +``` diff --git a/src/ansiblelint/rules/deprecated_local_action.py b/src/ansiblelint/rules/deprecated_local_action.py new file mode 100644 index 0000000..fc3e4ff --- /dev/null +++ b/src/ansiblelint/rules/deprecated_local_action.py @@ -0,0 +1,52 @@ +"""Implementation for deprecated-local-action rule.""" +# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp> +# Copyright (c) 2018, Ansible Project +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class TaskNoLocalAction(AnsibleLintRule): + """Do not use 'local_action', use 'delegate_to: localhost'.""" + + id = "deprecated-local-action" + description = "Do not use ``local_action``, use ``delegate_to: localhost``" + needs_raw_task = True + severity = "MEDIUM" + tags = ["deprecations"] + version_added = "v4.0.0" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + """Return matches for a task.""" + raw_task = task["__raw_task__"] + if "local_action" in raw_task: + return True + + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_local_action(default_rules_collection: RulesCollection) -> None: + """Positive test deprecated_local_action.""" + results = Runner( + "examples/playbooks/rule-deprecated-local-action-fail.yml", + rules=default_rules_collection, + ).run() + + assert len(results) == 1 + assert results[0].tag == "deprecated-local-action" diff --git a/src/ansiblelint/rules/deprecated_module.md b/src/ansiblelint/rules/deprecated_module.md new file mode 100644 index 0000000..c05d641 --- /dev/null +++ b/src/ansiblelint/rules/deprecated_module.md @@ -0,0 +1,32 @@ +# deprecated-module + +This rule identifies deprecated modules in playbooks. +You should avoid using deprecated modules because they are not maintained, which can pose a security risk. +Additionally when a module is deprecated it is available temporarily with a plan for future removal. + +Refer to the [Ansible module index](https://docs.ansible.com/ansible/latest/collections/index_module.html) for information about replacements and removal dates for deprecated modules. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Configure VLAN ID + ansible.netcommon.net_vlan: # <- Uses a deprecated module. + vlan_id: 20 +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Configure VLAN ID + dellemc.enterprise_sonic.sonic_vlans: # <- Uses a platform specific module. + config: + - vlan_id: 20 +``` diff --git a/src/ansiblelint/rules/deprecated_module.py b/src/ansiblelint/rules/deprecated_module.py new file mode 100644 index 0000000..03c9361 --- /dev/null +++ b/src/ansiblelint/rules/deprecated_module.py @@ -0,0 +1,78 @@ +"""Implementation of deprecated-module rule.""" +# Copyright (c) 2018, Ansible Project + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + + +class DeprecatedModuleRule(AnsibleLintRule): + """Deprecated module.""" + + id = "deprecated-module" + description = ( + "These are deprecated modules, some modules are kept " + "temporarily for backwards compatibility but usage is discouraged." + ) + link = "https://docs.ansible.com/ansible/latest/collections/index_module.html" + severity = "HIGH" + tags = ["deprecations"] + version_added = "v4.0.0" + + _modules = [ + "accelerate", + "aos_asn_pool", + "aos_blueprint", + "aos_blueprint_param", + "aos_blueprint_virtnet", + "aos_device", + "aos_external_router", + "aos_ip_pool", + "aos_logical_device", + "aos_logical_device_map", + "aos_login", + "aos_rack_type", + "aos_template", + "azure", + "cl_bond", + "cl_bridge", + "cl_img_install", + "cl_interface", + "cl_interface_policy", + "cl_license", + "cl_ports", + "cs_nic", + "docker", + "ec2_ami_find", + "ec2_ami_search", + "ec2_remote_facts", + "ec2_vpc", + "kubernetes", + "netscaler", + "nxos_ip_interface", + "nxos_mtu", + "nxos_portchannel", + "nxos_switchport", + "oc", + "panos_nat_policy", + "panos_security_policy", + "vsphere_guest", + "win_msi", + "include", + ] + + def matchtask( + self, + task: dict[str, Any], + file: Lintable | None = None, + ) -> bool | str: + module = task["action"]["__ansible_module__"] + if module in self._modules: + message = "{0} {1}" + return message.format(self.shortdesc, module) + return False diff --git a/src/ansiblelint/rules/empty_string_compare.md b/src/ansiblelint/rules/empty_string_compare.md new file mode 100644 index 0000000..c20bc51 --- /dev/null +++ b/src/ansiblelint/rules/empty_string_compare.md @@ -0,0 +1,44 @@ +# empty-string-compare + +This rule checks for empty string comparison in playbooks. +To ensure code clarity you should avoid using empty strings in conditional statements with the `when` clause. + +- Use `when: var | length > 0` instead of `when: var != ""`. +- Use `when: var | length == 0` instead of `when: var == ""`. + +This is an opt-in rule. +You must enable it in your Ansible-lint configuration as follows: + +```yaml +enable_list: + - empty-string-compare +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Shut down + ansible.builtin.command: /sbin/shutdown -t now + when: ansible_os_family == "" # <- Compares with an empty string. + - name: Shut down + ansible.builtin.command: /sbin/shutdown -t now + when: ansible_os_family !="" # <- Compares with an empty string. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Shut down + ansible.builtin.shell: | + /sbin/shutdown -t now + echo $var == + when: ansible_os_family +``` diff --git a/src/ansiblelint/rules/empty_string_compare.py b/src/ansiblelint/rules/empty_string_compare.py new file mode 100644 index 0000000..5c7cafc --- /dev/null +++ b/src/ansiblelint/rules/empty_string_compare.py @@ -0,0 +1,80 @@ +"""Implementation of empty-string-compare rule.""" +# Copyright (c) 2016, Will Thames and contributors +# Copyright (c) 2018, Ansible Project + +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.yaml_utils import nested_items_path + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class ComparisonToEmptyStringRule(AnsibleLintRule): + """Don't compare to empty string.""" + + id = "empty-string-compare" + description = ( + 'Use ``when: var|length > 0`` rather than ``when: var != ""`` (or ' + 'conversely ``when: var|length == 0`` rather than ``when: var == ""``)' + ) + severity = "HIGH" + tags = ["idiom", "opt-in"] + version_added = "v4.0.0" + + empty_string_compare = re.compile("[=!]= ?(\"{2}|'{2})") + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + for k, v, _ in nested_items_path(task): + if k == "when": + if isinstance(v, str): + if self.empty_string_compare.search(v): + return True + elif isinstance(v, bool): + pass + else: + for item in v: + if isinstance(item, str) and self.empty_string_compare.search( + item, + ): + return True + + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_rule_empty_string_compare_fail() -> None: + """Test rule matches.""" + rules = RulesCollection() + rules.register(ComparisonToEmptyStringRule()) + results = Runner( + "examples/playbooks/rule-empty-string-compare-fail.yml", + rules=rules, + ).run() + assert len(results) == 3 + for result in results: + assert result.message == ComparisonToEmptyStringRule().shortdesc + + def test_rule_empty_string_compare_pass() -> None: + """Test rule matches.""" + rules = RulesCollection() + rules.register(ComparisonToEmptyStringRule()) + results = Runner( + "examples/playbooks/rule-empty-string-compare-pass.yml", + rules=rules, + ).run() + assert len(results) == 0, results diff --git a/src/ansiblelint/rules/fqcn.md b/src/ansiblelint/rules/fqcn.md new file mode 100644 index 0000000..0165477 --- /dev/null +++ b/src/ansiblelint/rules/fqcn.md @@ -0,0 +1,89 @@ +# fqcn + +This rule checks for fully-qualified collection names (FQCN) in Ansible content. + +Declaring an FQCN ensures that an action uses code from the correct namespace. +This avoids ambiguity and conflicts that can cause operations to fail or produce +unexpected results. + +The `fqcn` rule has the following checks: + +- `fqcn[action]` - Use FQCN for module actions, such ... +- `fqcn[action-core]` - Checks for FQCNs from the `ansible.legacy` or + `ansible.builtin` collection. +- `fqcn[canonical]` - You should use canonical module name ... instead of ... +- [`fqcn[deep]`](#deep-modules) - Checks for deep/nested plugins directory + inside collections. +- `fqcn[keyword]` - Avoid `collections` keyword by using FQCN for all plugins, + modules, roles and playbooks. + +!!! note + + In most cases you should declare the `ansible.builtin` collection for internal Ansible actions. + You should declare the `ansible.legacy` collection if you use local overrides with actions, such with as the ``shell`` module. + +!!! warning + + This rule does not take [`collections` keyword](https://docs.ansible.com/ansible/latest/collections_guide/collections_using_playbooks.html#simplifying-module-names-with-the-collections-keyword) into consideration for resolving content. + The `collections` keyword provided a temporary mechanism transitioning to Ansible 2.9. + You should rewrite any content that uses the `collections:` key and avoid it where possible. + +## Canonical module names + +Canonical module names are also known as **resolved module names** and they are +to be preferred for most cases. Many Ansible modules have multiple aliases and +redirects, as these were created over time while the content was refactored. +Still, all of them do finally resolve to the same module name, but not without +adding some performance overhead. As very old aliases are at some point removed, +it makes to just refresh the content to make it point to the current canonical +name. + +The only exception for using a canonical name is if your code still needs to be +compatible with a very old version of Ansible, one that does not know how to +resolve that name. If you find yourself in such a situation, feel free to add +this rule to the ignored list. + +## Deep modules + +When writing modules, you should avoid nesting them in deep directories, even if +Ansible allows you to do so. Since early 2023, the official guidance, backed by +the core team, is to use a flat directory structure for modules. This ensures +optimal performance. + +Existing collections that still use deep directories can migrate to the flat +structure in a backward-compatible way by adding redirects like in +[this example](https://github.com/ansible-collections/community.general/blob/main/meta/runtime.yml#L227-L233). + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Create an SSH connection + shell: ssh ssh_user@{{ ansible_ssh_host }} # <- Does not use the FQCN for the shell module. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook (1st solution) + hosts: all + tasks: + - name: Create an SSH connection + # Use the FQCN for the legacy shell module and allow local overrides. + ansible.legacy.shell: + ssh ssh_user@{{ ansible_ssh_host }} -o IdentityFile=path/to/my_rsa +``` + +```yaml +--- +- name: Example playbook (2nd solution) + hosts: all + tasks: + - name: Create an SSH connection + # Use the FQCN for the builtin shell module. + ansible.builtin.shell: ssh ssh_user@{{ ansible_ssh_host }} +``` diff --git a/src/ansiblelint/rules/fqcn.py b/src/ansiblelint/rules/fqcn.py new file mode 100644 index 0000000..768fb9e --- /dev/null +++ b/src/ansiblelint/rules/fqcn.py @@ -0,0 +1,284 @@ +"""Rule definition for usage of fully qualified collection names for builtins.""" +from __future__ import annotations + +import logging +import sys +from typing import TYPE_CHECKING, Any + +from ansible.plugins.loader import module_loader + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule, TransformMixin + +if TYPE_CHECKING: + from ruamel.yaml.comments import CommentedMap, CommentedSeq + + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +_logger = logging.getLogger(__name__) + +builtins = [ + "add_host", + "apt", + "apt_key", + "apt_repository", + "assemble", + "assert", + "async_status", + "blockinfile", + "command", + "copy", + "cron", + "debconf", + "debug", + "dnf", + "dpkg_selections", + "expect", + "fail", + "fetch", + "file", + "find", + "gather_facts", + "get_url", + "getent", + "git", + "group", + "group_by", + "hostname", + "import_playbook", + "import_role", + "import_tasks", + "include", + "include_role", + "include_tasks", + "include_vars", + "iptables", + "known_hosts", + "lineinfile", + "meta", + "package", + "package_facts", + "pause", + "ping", + "pip", + "raw", + "reboot", + "replace", + "rpm_key", + "script", + "service", + "service_facts", + "set_fact", + "set_stats", + "setup", + "shell", + "slurp", + "stat", + "subversion", + "systemd", + "sysvinit", + "tempfile", + "template", + "unarchive", + "uri", + "user", + "wait_for", + "wait_for_connection", + "yum", + "yum_repository", +] + + +class FQCNBuiltinsRule(AnsibleLintRule, TransformMixin): + """Use FQCN for builtin actions.""" + + id = "fqcn" + severity = "MEDIUM" + description = ( + "Check whether actions are using using full qualified collection names." + ) + tags = ["formatting"] + version_added = "v6.8.0" + module_aliases: dict[str, str] = {"block/always/rescue": "block/always/rescue"} + _ids = { + "fqcn[action-core]": "Use FQCN for builtin module actions", + "fqcn[action]": "Use FQCN for module actions", + "fqcn[canonical]": "You should use canonical module name", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + result = [] + module = task["action"]["__ansible_module_original__"] + + if module not in self.module_aliases: + loaded_module = module_loader.find_plugin_with_context(module) + target = loaded_module.resolved_fqcn + self.module_aliases[module] = target + if target is None: + _logger.warning("Unable to resolve FQCN for module %s", module) + self.module_aliases[module] = module + return [] + if target not in self.module_aliases: + self.module_aliases[target] = target + + if module != self.module_aliases[module]: + module_alias = self.module_aliases[module] + if module_alias.startswith("ansible.builtin"): + legacy_module = module_alias.replace( + "ansible.builtin.", + "ansible.legacy.", + 1, + ) + if module != legacy_module: + result.append( + self.create_matcherror( + message=f"Use FQCN for builtin module actions ({module}).", + details=f"Use `{module_alias}` or `{legacy_module}` instead.", + filename=file, + lineno=task["__line__"], + tag="fqcn[action-core]", + ), + ) + else: + if module.count(".") < 2: + result.append( + self.create_matcherror( + message=f"Use FQCN for module actions, such `{self.module_aliases[module]}`.", + details=f"Action `{module}` is not FQCN.", + filename=file, + lineno=task["__line__"], + tag="fqcn[action]", + ), + ) + # TODO(ssbarnea): Remove the c.g. and c.n. exceptions from here once # noqa: FIX002 + # community team is flattening these. + # https://github.com/ansible-community/community-topics/issues/147 + elif not module.startswith("community.general.") or module.startswith( + "community.network.", + ): + result.append( + self.create_matcherror( + message=f"You should use canonical module name `{self.module_aliases[module]}` instead of `{module}`.", + filename=file, + lineno=task["__line__"], + tag="fqcn[canonical]", + ), + ) + return result + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return matches found for a specific YAML text.""" + result = [] + if file.kind == "plugin": + i = file.path.resolve().parts.index("plugins") + plugin_type = file.path.resolve().parts[i : i + 2] + short_path = file.path.resolve().parts[i + 2 :] + if len(short_path) > 1: + result.append( + self.create_matcherror( + message=f"Deep plugins directory is discouraged. Move '{file.path}' directly under '{'/'.join(plugin_type)}' folder.", + tag="fqcn[deep]", + filename=file, + ), + ) + elif file.kind == "playbook": + for play in file.data: + if play is None: + continue + + result.extend(self.matchplay(file, play)) + return result + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + if file.kind != "playbook": + return [] + if "collections" in data: + return [ + self.create_matcherror( + message="Avoid `collections` keyword by using FQCN for all plugins, modules, roles and playbooks.", + lineno=data[LINE_NUMBER_KEY], + tag="fqcn[keyword]", + filename=file, + ), + ] + return [] + + def transform( + self, + match: MatchError, + lintable: Lintable, + data: CommentedMap | CommentedSeq | str, + ) -> None: + if match.tag in self.ids(): + target_task = self.seek(match.yaml_path, data) + # Unfortunately, a lot of data about Ansible content gets lost here, you only get a simple dict. + # For now, just parse the error messages for the data about action names etc. and fix this later. + if match.tag == "fqcn[action-core]": + # split at the first bracket, cut off the last bracket and dot + current_action = match.message.split("(")[1][:-2] + # This will always replace builtin modules with "ansible.builtin" versions, not "ansible.legacy". + # The latter is technically more correct in what ansible has executed so far, the former is most likely better understood and more robust. + new_action = match.details.split("`")[1] + elif match.tag == "fqcn[action]": + current_action = match.details.split("`")[1] + new_action = match.message.split("`")[1] + elif match.tag == "fqcn[canonical]": + current_action = match.message.split("`")[3] + new_action = match.message.split("`")[1] + for _ in range(len(target_task)): + k, v = target_task.popitem(False) + target_task[new_action if k == current_action else k] = v + match.fixed = True + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_fqcn_builtin_fail() -> None: + """Test rule matches.""" + collection = RulesCollection() + collection.register(FQCNBuiltinsRule()) + success = "examples/playbooks/rule-fqcn-fail.yml" + results = Runner(success, rules=collection).run() + assert len(results) == 3 + assert results[0].tag == "fqcn[keyword]" + assert "Avoid `collections` keyword" in results[0].message + assert results[1].tag == "fqcn[action-core]" + assert "Use FQCN for builtin module actions" in results[1].message + assert results[2].tag == "fqcn[action]" + assert "Use FQCN for module actions, such" in results[2].message + + def test_fqcn_builtin_pass() -> None: + """Test rule does not match.""" + collection = RulesCollection() + collection.register(FQCNBuiltinsRule()) + success = "examples/playbooks/rule-fqcn-pass.yml" + results = Runner(success, rules=collection).run() + assert len(results) == 0, results + + def test_fqcn_deep_fail() -> None: + """Test rule matches.""" + collection = RulesCollection() + collection.register(FQCNBuiltinsRule()) + failure = "examples/collection/plugins/modules/deep/beta.py" + results = Runner(failure, rules=collection).run() + assert len(results) == 1 + assert results[0].tag == "fqcn[deep]" + assert "Deep plugins directory is discouraged" in results[0].message + + def test_fqcn_deep_pass() -> None: + """Test rule does not match.""" + collection = RulesCollection() + collection.register(FQCNBuiltinsRule()) + success = "examples/collection/plugins/modules/alpha.py" + results = Runner(success, rules=collection).run() + assert len(results) == 0 diff --git a/src/ansiblelint/rules/galaxy.md b/src/ansiblelint/rules/galaxy.md new file mode 100644 index 0000000..61fc5c5 --- /dev/null +++ b/src/ansiblelint/rules/galaxy.md @@ -0,0 +1,111 @@ +# galaxy + +This rule identifies if the collection version mentioned in galaxy.yml is ideal +in terms of the version number being greater than or equal to `1.0.0`. + +This rule looks for a changelog file in expected locations, detailed below in +the Changelog Details section. + +This rule checks to see if the `galaxy.yml` file includes one of the required +tags for certification on Automation Hub. Additional custom tags can be added, +but one or more of these tags must be present for certification. + +The tag list is as follows: `application`, `cloud`,`database`, `infrastructure`, +`linux`, `monitoring`, `networking`, `security`,`storage`, `tools`, `windows`. + +This rule can produce messages such: + +- `galaxy[version-missing]` - `galaxy.yaml` should have version tag. +- `galaxy[version-incorrect]` - collection version should be greater than or + equal to `1.0.0` +- `galaxy[no-changelog]` - collection is missing a changelog file in expected + locations. +- `galaxy[no-runtime]` - Please add a + [meta/runtime.yml](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_structure.html#meta-directory-and-runtime-yml) + file. +- `galaxy[tags]` - `galaxy.yaml` must have one of the required tags: + `application`, `cloud`, `database`, `infrastructure`, `linux`, `monitoring`, + `networking`, `security`, `storage`, `tools`, `windows`. + +If you want to ignore some of the messages above, you can add any of them to the +`ignore_list`. + +## Problematic code + +```yaml +# galaxy.yml +--- +name: foo +namespace: bar +version: 0.2.3 # <-- collection version should be >= 1.0.0 +authors: + - John +readme: ../README.md +description: "..." +``` + +## Correct code + +```yaml +# galaxy.yml +--- +name: foo +namespace: bar +version: 1.0.0 +authors: + - John +readme: ../README.md +description: "..." +``` + +# Changelog Details + +This rule expects a `CHANGELOG.md` or `.rst` file in the collection root or a +`changelogs/changelog.yaml` file. + +If a `changelogs/changelog.yaml` file exists, the schema will be checked. + +## Minimum required changelog.yaml file + +```yaml +# changelog.yaml +--- +releases: {} +``` + +# Required Tag Details + +## Problematic code + +```yaml +# galaxy.yml +--- +namespace: bar +name: foo +version: 1.0.0 +authors: + - John +readme: ../README.md +description: "..." +license: + - Apache-2.0 +repository: https://github.com/ORG/REPO_NAME +``` + +## Correct code + +```yaml +# galaxy.yml +--- +namespace: bar +name: foo +version: 1.0.0 +authors: + - John +readme: ../README.md +description: "..." +license: + - Apache-2.0 +repository: https://github.com/ORG/REPO_NAME +tags: [networking, test_tag, test_tag_2] +``` diff --git a/src/ansiblelint/rules/galaxy.py b/src/ansiblelint/rules/galaxy.py new file mode 100644 index 0000000..2f627f5 --- /dev/null +++ b/src/ansiblelint/rules/galaxy.py @@ -0,0 +1,251 @@ +"""Implementation of GalaxyRule.""" +from __future__ import annotations + +import sys +from functools import total_ordering +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +class GalaxyRule(AnsibleLintRule): + """Rule for checking collection version is greater than 1.0.0 and checking for changelog.""" + + id = "galaxy" + description = "Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0 and check for changelog." + severity = "MEDIUM" + tags = ["metadata"] + version_added = "v6.11.0 (last update)" + _ids = { + "galaxy[tags]": "galaxy.yaml must have one of the required tags", + "galaxy[no-changelog]": "No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.", + "galaxy[version-missing]": "galaxy.yaml should have version tag.", + "galaxy[version-incorrect]": "collection version should be greater than or equal to 1.0.0", + "galaxy[no-runtime]": "meta/runtime.yml file not found.", + } + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific play (entry in playbook).""" + if file.kind != "galaxy": # type: ignore[comparison-overlap] + return [] + + # Defined by Automation Hub Team and Partner Engineering + required_tag_list = [ + "application", + "cloud", + "database", + "infrastructure", + "linux", + "monitoring", + "networking", + "security", + "storage", + "tools", + "windows", + ] + + results = [] + + base_path = file.path.parent.resolve() + changelog_found = 0 + changelog_paths = [ + base_path / "changelogs" / "changelog.yaml", + base_path / "CHANGELOG.rst", + base_path / "CHANGELOG.md", + ] + + for path in changelog_paths: + if path.is_file(): + changelog_found = 1 + + galaxy_tag_list = data.get("tags", None) + + # Changelog Check - building off Galaxy rule as there is no current way to check + # for a nonexistent file + if not changelog_found: + results.append( + self.create_matcherror( + message="No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.", + tag="galaxy[no-changelog]", + filename=file, + ), + ) + + # Checking if galaxy.yml contains one or more required tags for certification + if not galaxy_tag_list or not any( + tag in required_tag_list for tag in galaxy_tag_list + ): + results.append( + self.create_matcherror( + message=( + f"galaxy.yaml must have one of the required tags: {required_tag_list}" + ), + tag="galaxy[tags]", + filename=file, + ), + ) + + if "version" not in data: + results.append( + self.create_matcherror( + message="galaxy.yaml should have version tag.", + lineno=data[LINE_NUMBER_KEY], + tag="galaxy[version-missing]", + filename=file, + ), + ) + return results + # returning here as it does not make sense + # to continue for version check below + + version = data.get("version") + if Version(version) < Version("1.0.0"): + results.append( + self.create_matcherror( + message="collection version should be greater than or equal to 1.0.0", + # pylint: disable=protected-access + lineno=version._line_number, # noqa: SLF001 + tag="galaxy[version-incorrect]", + filename=file, + ), + ) + + if not (base_path / "meta" / "runtime.yml").is_file(): + results.append( + self.create_matcherror( + message="meta/runtime.yml file not found.", + tag="galaxy[no-runtime]", + filename=file, + ), + ) + + return results + + +@total_ordering +class Version: + """Simple class to compare arbitrary versions.""" + + def __init__(self, version_string: str): + """Construct a Version object.""" + self.components = version_string.split(".") + + def __eq__(self, other: object) -> bool: + """Implement equality comparison.""" + try: + other = _coerce(other) + except NotImplementedError: + return NotImplemented + + return self.components == other.components + + def __lt__(self, other: Version) -> bool: + """Implement lower-than operation.""" + other = _coerce(other) + + return self.components < other.components + + +def _coerce(other: object) -> Version: + if isinstance(other, str): + other = Version(other) + if isinstance(other, (int, float)): + other = Version(str(other)) + if isinstance(other, Version): + return other + msg = f"Unable to coerce object type {type(other)} to Version" + raise NotImplementedError(msg) + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner + + def test_galaxy_collection_version_positive() -> None: + """Positive test for collection version in galaxy.""" + collection = RulesCollection() + collection.register(GalaxyRule()) + success = "examples/collection/galaxy.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_galaxy_collection_version_negative() -> None: + """Negative test for collection version in galaxy.""" + collection = RulesCollection() + collection.register(GalaxyRule()) + failure = "examples/meta/galaxy.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 1 + + def test_galaxy_no_collection_version() -> None: + """Test for no collection version in galaxy.""" + collection = RulesCollection() + collection.register(GalaxyRule()) + failure = "examples/no_collection_version/galaxy.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 1 + + def test_version_class() -> None: + """Test for version class.""" + v = Version("1.0.0") + assert v == Version("1.0.0") + assert v != NotImplemented + + def test_coerce() -> None: + """Test for _coerce function.""" + assert _coerce("1.0") == Version("1.0") + assert _coerce(1.0) == Version("1.0") + expected = "Unable to coerce object type" + with pytest.raises(NotImplementedError, match=expected): + _coerce(type(Version)) + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/galaxy_no_required_tags/fail/galaxy.yml", + ["galaxy[tags]"], + id="tags", + ), + pytest.param( + "examples/galaxy_no_required_tags/pass/galaxy.yml", + [], + id="pass", + ), + pytest.param( + "examples/collection/galaxy.yml", + ["schema[galaxy]"], + id="schema", + ), + pytest.param( + "examples/no_changelog/galaxy.yml", + ["galaxy[no-changelog]"], + id="no-changelog", + ), + pytest.param( + "examples/no_collection_version/galaxy.yml", + ["schema[galaxy]", "galaxy[version-missing]"], + id="no-collection-version", + ), + ), + ) + def test_galaxy_rule( + default_rules_collection: RulesCollection, + file: str, + expected: list[str], + ) -> None: + """Validate that rule works as intended.""" + results = Runner(file, rules=default_rules_collection).run() + + assert len(results) == len(expected) + for index, result in enumerate(results): + assert result.tag == expected[index] diff --git a/src/ansiblelint/rules/ignore_errors.md b/src/ansiblelint/rules/ignore_errors.md new file mode 100644 index 0000000..cb17774 --- /dev/null +++ b/src/ansiblelint/rules/ignore_errors.md @@ -0,0 +1,61 @@ +# ignore-errors + +This rule checks that playbooks do not use the `ignore_errors` directive to ignore all errors. +Ignoring all errors in a playbook hides actual failures, incorrectly mark tasks as failed, and result in unexpected side effects and behavior. + +Instead of using the `ignore_errors: true` directive, you should do the following: + +- Ignore errors only when using the `{{ ansible_check_mode }}` variable. +- Use `register` to register errors. +- Use `failed_when:` and specify acceptable error conditions. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Run apt-get update + ansible.builtin.command: apt-get update + ignore_errors: true # <- Ignores all errors, including important failures. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Run apt-get update + ansible.builtin.command: apt-get update + ignore_errors: "{{ ansible_check_mode }}" # <- Ignores errors in check mode. +``` + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Run apt-get update + ansible.builtin.command: apt-get update + ignore_errors: true + register: ignore_errors_register # <- Stores errors and failures for evaluation. +``` + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Disable apport + become: "yes" + lineinfile: + line: "enabled=0" + dest: /etc/default/apport + mode: 0644 + state: present + register: default_apport + failed_when: default_apport.rc !=0 and not default_apport.rc == 257 # <- Defines conditions that constitute a failure. +``` diff --git a/src/ansiblelint/rules/ignore_errors.py b/src/ansiblelint/rules/ignore_errors.py new file mode 100644 index 0000000..4144f2d --- /dev/null +++ b/src/ansiblelint/rules/ignore_errors.py @@ -0,0 +1,144 @@ +"""IgnoreErrorsRule used with ansible-lint.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class IgnoreErrorsRule(AnsibleLintRule): + """Use failed_when and specify error conditions instead of using ignore_errors.""" + + id = "ignore-errors" + description = ( + "Instead of ignoring all errors, ignore the errors only when using ``{{ ansible_check_mode }}``, " + "register the errors using ``register``, " + "or use ``failed_when:`` and specify acceptable error conditions " + "to reduce the risk of ignoring important failures." + ) + severity = "LOW" + tags = ["unpredictability"] + version_added = "v5.0.7" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if ( + task.get("ignore_errors") + and task.get("ignore_errors") != "{{ ansible_check_mode }}" + and not task.get("register") + ): + return True + + return False + + +if "pytest" in sys.modules: + import pytest + + if TYPE_CHECKING: + from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports + + IGNORE_ERRORS_TRUE = """ +- hosts: all + tasks: + - name: Run apt-get update + command: apt-get update + ignore_errors: true +""" + + IGNORE_ERRORS_FALSE = """ +- hosts: all + tasks: + - name: Run apt-get update + command: apt-get update + ignore_errors: false +""" + + IGNORE_ERRORS_CHECK_MODE = """ +- hosts: all + tasks: + - name: Run apt-get update + command: apt-get update + ignore_errors: "{{ ansible_check_mode }}" +""" + + IGNORE_ERRORS_REGISTER = """ +- hosts: all + tasks: + - name: Run apt-get update + command: apt-get update + ignore_errors: true + register: ignore_errors_register +""" + + FAILED_WHEN = """ +- hosts: all + tasks: + - name: Disable apport + become: 'yes' + lineinfile: + line: "enabled=0" + dest: /etc/default/apport + mode: 0644 + state: present + register: default_apport + failed_when: default_apport.rc !=0 and not default_apport.rc == 257 +""" + + @pytest.mark.parametrize( + "rule_runner", + (IgnoreErrorsRule,), + indirect=["rule_runner"], + ) + def test_ignore_errors_true(rule_runner: RunFromText) -> None: + """The task uses ignore_errors.""" + results = rule_runner.run_playbook(IGNORE_ERRORS_TRUE) + assert len(results) == 1 + + @pytest.mark.parametrize( + "rule_runner", + (IgnoreErrorsRule,), + indirect=["rule_runner"], + ) + def test_ignore_errors_false(rule_runner: RunFromText) -> None: + """The task uses ignore_errors: false, oddly enough.""" + results = rule_runner.run_playbook(IGNORE_ERRORS_FALSE) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (IgnoreErrorsRule,), + indirect=["rule_runner"], + ) + def test_ignore_errors_check_mode(rule_runner: RunFromText) -> None: + """The task uses ignore_errors: "{{ ansible_check_mode }}".""" + results = rule_runner.run_playbook(IGNORE_ERRORS_CHECK_MODE) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (IgnoreErrorsRule,), + indirect=["rule_runner"], + ) + def test_ignore_errors_register(rule_runner: RunFromText) -> None: + """The task uses ignore_errors: but output is registered and managed.""" + results = rule_runner.run_playbook(IGNORE_ERRORS_REGISTER) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (IgnoreErrorsRule,), + indirect=["rule_runner"], + ) + def test_failed_when(rule_runner: RunFromText) -> None: + """Instead of ignore_errors, this task uses failed_when.""" + results = rule_runner.run_playbook(FAILED_WHEN) + assert len(results) == 0 diff --git a/src/ansiblelint/rules/inline_env_var.md b/src/ansiblelint/rules/inline_env_var.md new file mode 100644 index 0000000..bc83f7e --- /dev/null +++ b/src/ansiblelint/rules/inline_env_var.md @@ -0,0 +1,38 @@ +# inline-env-var + +This rule checks that playbooks do not set environment variables in the `ansible.builtin.command` module. + +You should set environment variables with the `ansible.builtin.shell` module or the `environment` keyword. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Set environment variable + ansible.builtin.command: MY_ENV_VAR=my_value # <- Sets an environment variable in the command module. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Set environment variable + ansible.builtin.shell: echo $MY_ENV_VAR + environment: + MY_ENV_VAR: my_value # <- Sets an environment variable with the environment keyword. +``` + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Set environment variable + ansible.builtin.shell: MY_ENV_VAR=my_value # <- Sets an environment variable with the shell module. +``` diff --git a/src/ansiblelint/rules/inline_env_var.py b/src/ansiblelint/rules/inline_env_var.py new file mode 100644 index 0000000..f578fb7 --- /dev/null +++ b/src/ansiblelint/rules/inline_env_var.py @@ -0,0 +1,76 @@ +"""Implementation of inside-env-var rule.""" +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ansiblelint.constants import FILENAME_KEY, LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import Task, get_first_cmd_arg + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + + +class EnvVarsInCommandRule(AnsibleLintRule): + """Command module does not accept setting environment variables inline.""" + + id = "inline-env-var" + description = ( + "Use ``environment:`` to set environment variables " + "or use ``shell`` module which accepts both" + ) + severity = "VERY_HIGH" + tags = ["command-shell", "idiom"] + version_added = "historic" + + expected_args = [ + "chdir", + "creates", + "executable", + "removes", + "stdin", + "warn", + "stdin_add_newline", + "strip_empty_ends", + "cmd", + "__ansible_module__", + "__ansible_module_original__", + "_raw_params", + LINE_NUMBER_KEY, + FILENAME_KEY, + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["action"]["__ansible_module__"] in ["command"]: + first_cmd_arg = get_first_cmd_arg(task) + if not first_cmd_arg: + return False + + return any( + [arg not in self.expected_args for arg in task["action"]] + + ["=" in first_cmd_arg], + ) + return False diff --git a/src/ansiblelint/rules/jinja.md b/src/ansiblelint/rules/jinja.md new file mode 100644 index 0000000..8e1732e --- /dev/null +++ b/src/ansiblelint/rules/jinja.md @@ -0,0 +1,55 @@ +# jinja + +This rule can report problems related to jinja2 string templates. The current +version can report: + +- `jinja[spacing]` when there are no spaces between variables + and operators, including filters, like `{{ var_name | filter }}`. This + improves readability and makes it less likely to introduce typos. +- `jinja[invalid]` when the jinja2 template is invalid, like `{{ {{ '1' }} }}`, + which would result in a runtime error if you try to use it with Ansible, even + if it does pass the Ansible syntax check. + +As jinja2 syntax is closely following Python one we aim to follow +[black](https://black.readthedocs.io/en/stable/) formatting rules. If you are +curious how black would reformat a small sniped feel free to visit +[online black formatter](https://black.vercel.app/) site. Keep in mind to not +include the entire jinja2 template, so instead of `{{ 1+2==3 }}`, do paste +only `1+2==3`. + +In ansible, `changed_when`, `failed_when`, `until`, `when` are considered to +use implicit jinja2 templating, meaning that they do not require `{{ }}`. Our +rule will suggest the removal of the braces for these fields. + +## Problematic code + +```yaml +--- +- name: Some task + vars: + foo: "{{some|dict2items}}" # <-- jinja[spacing] + bar: "{{ & }}" # <-- jinja[invalid] + when: "{{ foo | bool }}" # <-- jinja[spacing] - 'when' has implicit templating +``` + +## Correct code + +```yaml +--- +- name: Some task + vars: + foo: "{{ some | dict2items }}" + bar: "{{ '&' }}" + when: foo | bool +``` + +## Current limitations + +In its current form, this rule presents the following limitations: + +- Jinja2 blocks that have newlines in them will not be reformatted because we + consider that the user deliberately wanted to format them in a particular way. +- Jinja2 blocks that use tilde as a binary operation are ignored because black + does not support tilde as a binary operator. Example: `{{ a ~ b }}`. +- Jinja2 blocks that use dot notation with numbers are ignored because python + and black do not allow it. Example: `{{ foo.0.bar }}` diff --git a/src/ansiblelint/rules/jinja.py b/src/ansiblelint/rules/jinja.py new file mode 100644 index 0000000..08254bc --- /dev/null +++ b/src/ansiblelint/rules/jinja.py @@ -0,0 +1,740 @@ +"""Rule for checking content of jinja template strings.""" +from __future__ import annotations + +import logging +import re +import sys +from collections import namedtuple +from pathlib import Path +from typing import TYPE_CHECKING, Any + +import black +import jinja2 +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.parsing.yaml.objects import AnsibleUnicode +from jinja2.exceptions import TemplateSyntaxError + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.skip_utils import get_rule_skips_from_line +from ansiblelint.text import has_jinja +from ansiblelint.utils import parse_yaml_from_file, template +from ansiblelint.yaml_utils import deannotate, nested_items_path + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.utils import Task + + +_logger = logging.getLogger(__package__) +KEYWORDS_WITH_IMPLICIT_TEMPLATE = ("changed_when", "failed_when", "until", "when") + +Token = namedtuple("Token", "lineno token_type value") + +ignored_re = re.compile( + "|".join( # noqa: FLY002 + [ + r"^Object of type method is not JSON serializable", + r"^Unexpected templating type error occurred on", + r"^obj must be a list of dicts or a nested dict$", + r"^the template file (.*) could not be found for the lookup$", + r"could not locate file in lookup", + r"unable to locate collection", + r"^Error in (.*)is undefined$", + r"^Mandatory variable (.*) not defined.$", + r"is undefined", + r"Unrecognized type <<class 'ansible.template.AnsibleUndefined'>> for (.*) filter <value>$", + # https://github.com/ansible/ansible-lint/issues/3155 + r"^The '(.*)' test expects a dictionary$", + ], + ), + flags=re.MULTILINE | re.DOTALL, +) + + +class JinjaRule(AnsibleLintRule): + """Rule that looks inside jinja2 templates.""" + + id = "jinja" + severity = "LOW" + tags = ["formatting"] + version_added = "v6.5.0" + _ansible_error_re = re.compile( + r"^(?P<error>.*): (?P<detail>.*)\. String: (?P<string>.*)$", + flags=re.MULTILINE, + ) + + env = jinja2.Environment(trim_blocks=False) + _tag2msg = { + "invalid": "Syntax error in jinja2 template: {value}", + "spacing": "Jinja2 spacing could be improved: {value} -> {reformatted}", + } + _ids = { + "jinja[invalid]": "Invalid jinja2 syntax", + "jinja[spacing]": "Jinja2 spacing could be improved", + } + + def _msg(self, tag: str, value: str, reformatted: str) -> str: + """Generate error message.""" + return self._tag2msg[tag].format(value=value, reformatted=reformatted) + + # pylint: disable=too-many-locals + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + result = [] + try: + for key, v, path in nested_items_path( + task, + ignored_keys=("block", "ansible.builtin.block", "ansible.legacy.block"), + ): + if isinstance(v, str): + try: + template( + basedir=file.path.parent if file else Path("."), + value=v, + variables=deannotate(task.get("vars", {})), + fail_on_error=True, # we later decide which ones to ignore or not + ) + # ValueError RepresenterError + except AnsibleError as exc: + bypass = False + orig_exc = ( + exc.orig_exc if getattr(exc, "orig_exc", None) else exc + ) + orig_exc_message = getattr(orig_exc, "message", str(orig_exc)) + match = self._ansible_error_re.match( + getattr(orig_exc, "message", str(orig_exc)), + ) + if ignored_re.search(orig_exc_message) or isinstance( + orig_exc, + AnsibleParserError, + ): + # An unhandled exception occurred while running the lookup plugin 'template'. Error was a <class 'ansible.errors.AnsibleError'>, original message: the template file ... could not be found for the lookup. the template file ... could not be found for the lookup + + # ansible@devel (2.14) new behavior: + # AnsibleError(TemplateSyntaxError): template error while templating string: Could not load "ipwrap": 'Invalid plugin FQCN (ansible.netcommon.ipwrap): unable to locate collection ansible.netcommon'. String: Foo {{ buildset_registry.host | ipwrap }}. Could not load "ipwrap": 'Invalid plugin FQCN (ansible.netcommon.ipwrap): unable to locate collection ansible.netcommon' + bypass = True + elif ( + isinstance(orig_exc, (AnsibleError, TemplateSyntaxError)) + and match + ): + error = match.group("error") + detail = match.group("detail") + if error.startswith( + "template error while templating string", + ): + bypass = False + elif detail.startswith("unable to locate collection"): + _logger.debug("Ignored AnsibleError: %s", exc) + bypass = True + else: + bypass = False + elif re.match(r"^lookup plugin (.*) not found$", exc.message): + # lookup plugin 'template' not found + bypass = True + + # AnsibleError: template error while templating string: expected token ':', got '}'. String: {{ {{ '1' }} }} + # AnsibleError: template error while templating string: unable to locate collection ansible.netcommon. String: Foo {{ buildset_registry.host | ipwrap }} + if not bypass: + result.append( + self.create_matcherror( + message=str(exc), + lineno=_get_error_line(task, path), + filename=file, + tag=f"{self.id}[invalid]", + ), + ) + continue + reformatted, details, tag = self.check_whitespace( + v, + key=key, + lintable=file, + ) + if reformatted != v: + result.append( + self.create_matcherror( + message=self._msg( + tag=tag, + value=v, + reformatted=reformatted, + ), + lineno=_get_error_line(task, path), + details=details, + filename=file, + tag=f"{self.id}[{tag}]", + ), + ) + except Exception as exc: + _logger.info("Exception in JinjaRule.matchtask: %s", exc) + raise + return result + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return matches for variables defined in vars files.""" + data: dict[str, Any] = {} + raw_results: list[MatchError] = [] + results: list[MatchError] = [] + + if str(file.kind) == "vars": + data = parse_yaml_from_file(str(file.path)) + # pylint: disable=unused-variable + for key, v, _path in nested_items_path(data): + if isinstance(v, AnsibleUnicode): + reformatted, details, tag = self.check_whitespace( + v, + key=key, + lintable=file, + ) + if reformatted != v: + results.append( + self.create_matcherror( + message=self._msg( + tag=tag, + value=v, + reformatted=reformatted, + ), + lineno=v.ansible_pos[1], + details=details, + filename=file, + tag=f"{self.id}[{tag}]", + ), + ) + if raw_results: + lines = file.content.splitlines() + for match in raw_results: + # lineno starts with 1, not zero + skip_list = get_rule_skips_from_line( + line=lines[match.lineno - 1], + lintable=file, + ) + if match.rule.id not in skip_list and match.tag not in skip_list: + results.append(match) + else: + results.extend(super().matchyaml(file)) + return results + + def lex(self, text: str) -> list[Token]: + """Parse jinja template.""" + # https://github.com/pallets/jinja/issues/1711 + self.env.keep_trailing_newline = True + + self.env.lstrip_blocks = False + self.env.trim_blocks = False + self.env.autoescape = True + self.env.newline_sequence = "\n" + tokens = [ + Token(lineno=t[0], token_type=t[1], value=t[2]) for t in self.env.lex(text) + ] + new_text = self.unlex(tokens) + if text != new_text: + _logger.debug( + "Unable to perform full roundtrip lex-unlex on jinja template (expected when '-' modifier is used): {text} -> {new_text}", + ) + return tokens + + def unlex(self, tokens: list[Token]) -> str: + """Return original text by compiling the lex output.""" + result = "" + last_lineno = 1 + last_value = "" + for lineno, _, value in tokens: + if lineno > last_lineno and "\n" not in last_value: + result += "\n" + result += value + last_lineno = lineno + last_value = value + return result + + # pylint: disable=too-many-statements,too-many-locals + def check_whitespace( + self, + text: str, + key: str, + lintable: Lintable | None = None, + ) -> tuple[str, str, str]: + """Check spacing inside given jinja2 template string. + + We aim to match Python Black formatting rules. + :raises NotImplementedError: On few cases where valid jinja is not valid Python. + + :returns: (string, string, string) reformatted text, detailed error, error tag + """ + + def cook(value: str, *, implicit: bool = False) -> str: + """Prepare an implicit string for jinja parsing when needed.""" + if not implicit: + return value + if value.startswith("{{") and value.endswith("}}"): + # maybe we should make this an error? + return value + return f"{{{{ {value} }}}}" + + def uncook(value: str, *, implicit: bool = False) -> str: + """Restore an string to original form when it was an implicit one.""" + if not implicit: + return value + return value[3:-3] + + tokens = [] + details = "" + begin_types = ("variable_begin", "comment_begin", "block_begin") + end_types = ("variable_end", "comment_end", "block_end") + implicit = False + + # implicit templates do not have the {{ }} wrapping + if ( + key in KEYWORDS_WITH_IMPLICIT_TEMPLATE + and lintable + and lintable.kind + in ( + "playbook", + "task", + ) + ): + implicit = True + text = cook(text, implicit=implicit) + + # don't try to lex strings that have no jinja inside them + if not has_jinja(text): + return text, "", "spacing" + + expr_str = None + expr_type = None + verb_skipped = True + lineno = 1 + try: + for token in self.lex(text): + if ( + expr_type + and expr_type.startswith("{%") + and token.token_type in ("name", "whitespace") + and not verb_skipped + ): + # on {% blocks we do not take first word as part of the expression + tokens.append(token) + if token.token_type != "whitespace": + verb_skipped = True + elif token.token_type in begin_types: + tokens.append(token) + expr_type = token.value # such {#, {{, {% + expr_str = "" + verb_skipped = False + elif token.token_type in end_types and expr_str is not None: + # process expression + # pylint: disable=unsupported-membership-test + if isinstance(expr_str, str) and "\n" in expr_str: + raise NotImplementedError + leading_spaces = " " * (len(expr_str) - len(expr_str.lstrip())) + expr_str = leading_spaces + blacken(expr_str.lstrip()) + if tokens[ + -1 + ].token_type != "whitespace" and not expr_str.startswith(" "): + expr_str = " " + expr_str + if not expr_str.endswith(" "): + expr_str += " " + tokens.append(Token(lineno, "data", expr_str)) + tokens.append(token) + expr_str = None + expr_type = None + elif expr_str is not None: + expr_str += token.value + else: + tokens.append(token) + lineno = token.lineno + + except jinja2.exceptions.TemplateSyntaxError as exc: + return "", str(exc.message), "invalid" + # https://github.com/PyCQA/pylint/issues/7433 - py311 only + # pylint: disable=c-extension-no-member + except (NotImplementedError, black.parsing.InvalidInput) as exc: + # black is not able to recognize all valid jinja2 templates, so we + # just ignore InvalidInput errors. + # NotImplementedError is raised internally for expressions with + # newlines, as we decided to not touch them yet. + # These both are documented as known limitations. + _logger.debug("Ignored jinja internal error %s", exc) + return uncook(text, implicit=implicit), "", "spacing" + + # finalize + reformatted = self.unlex(tokens) + failed = reformatted != text + reformatted = uncook(reformatted, implicit=implicit) + details = ( + f"Jinja2 template rewrite recommendation: `{reformatted}`." + if failed + else "" + ) + return reformatted, details, "spacing" + + +def blacken(text: str) -> str: + """Format Jinja2 template using black.""" + return black.format_str( + text, + mode=black.FileMode(line_length=sys.maxsize, string_normalization=False), + ).rstrip("\n") + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.fixture(name="error_expected_lines") + def fixture_error_expected_lines() -> list[int]: + """Return list of expected error lines.""" + return [33, 36, 39, 42, 45, 48, 74] + + # 21 68 + @pytest.fixture(name="lint_error_lines") + def fixture_lint_error_lines() -> list[int]: + """Get VarHasSpacesRules linting results on test_playbook.""" + collection = RulesCollection() + collection.register(JinjaRule()) + lintable = Lintable("examples/playbooks/jinja-spacing.yml") + results = Runner(lintable, rules=collection).run() + return [item.lineno for item in results] + + def test_jinja_spacing_playbook( + error_expected_lines: list[int], + lint_error_lines: list[int], + ) -> None: + """Ensure that expected error lines are matching found linting error lines.""" + # list unexpected error lines or non-matching error lines + error_lines_difference = list( + set(error_expected_lines).symmetric_difference(set(lint_error_lines)), + ) + assert len(error_lines_difference) == 0 + + def test_jinja_spacing_vars() -> None: + """Ensure that expected error details are matching found linting error details.""" + collection = RulesCollection() + collection.register(JinjaRule()) + lintable = Lintable("examples/playbooks/vars/jinja-spacing.yml") + results = Runner(lintable, rules=collection).run() + + error_expected_lineno = [14, 15, 16, 17, 18, 19, 32] + assert len(results) == len(error_expected_lineno) + for idx, err in enumerate(results): + assert err.lineno == error_expected_lineno[idx] + + @pytest.mark.parametrize( + ("text", "expected", "tag"), + ( + pytest.param( + "{{-x}}{#a#}{%1%}", + "{{- x }}{# a #}{% 1 %}", + "spacing", + id="add-missing-space", + ), + pytest.param("", "", "spacing", id="1"), + pytest.param("foo", "foo", "spacing", id="2"), + pytest.param("{##}", "{# #}", "spacing", id="3"), + # we want to keep leading spaces as they might be needed for complex multiline jinja files + pytest.param("{# #}", "{# #}", "spacing", id="4"), + pytest.param( + "{{-aaa|xx }}foo\nbar{#some#}\n{%%}", + "{{- aaa | xx }}foo\nbar{# some #}\n{% %}", + "spacing", + id="5", + ), + pytest.param( + "Shell with jinja filter", + "Shell with jinja filter", + "spacing", + id="6", + ), + pytest.param( + "{{{'dummy_2':1}|true}}", + "{{ {'dummy_2': 1} | true }}", + "spacing", + id="7", + ), + pytest.param("{{{foo:{}}}}", "{{ {foo: {}} }}", "spacing", id="8"), + pytest.param( + "{{ {'test': {'subtest': variable}} }}", + "{{ {'test': {'subtest': variable}} }}", + "spacing", + id="9", + ), + pytest.param( + "http://foo.com/{{\n case1 }}", + "http://foo.com/{{\n case1 }}", + "spacing", + id="10", + ), + pytest.param("{{foo(123)}}", "{{ foo(123) }}", "spacing", id="11"), + pytest.param("{{ foo(a.b.c) }}", "{{ foo(a.b.c) }}", "spacing", id="12"), + # pytest.param( + # "spacing", + # ), + pytest.param( + "{{foo(x =['server_options'])}}", + "{{ foo(x=['server_options']) }}", + "spacing", + id="14", + ), + pytest.param( + '{{ [ "host", "NA"] }}', + '{{ ["host", "NA"] }}', + "spacing", + id="15", + ), + pytest.param( + "{{ {'dummy_2': {'nested_dummy_1': value_1,\n 'nested_dummy_2': value_2}} |\ncombine(dummy_1) }}", + "{{ {'dummy_2': {'nested_dummy_1': value_1,\n 'nested_dummy_2': value_2}} |\ncombine(dummy_1) }}", + "spacing", + id="17", + ), + pytest.param("{{ & }}", "", "invalid", id="18"), + pytest.param( + "{{ good_format }}/\n{{- good_format }}\n{{- good_format -}}\n", + "{{ good_format }}/\n{{- good_format }}\n{{- good_format -}}\n", + "spacing", + id="19", + ), + pytest.param( + "{{ {'a': {'b': 'x', 'c': y}} }}", + "{{ {'a': {'b': 'x', 'c': y}} }}", + "spacing", + id="20", + ), + pytest.param( + "2*(1+(3-1)) is {{ 2 * {{ 1 + {{ 3 - 1 }}}} }}", + "2*(1+(3-1)) is {{ 2 * {{1 + {{3 - 1}}}} }}", + "spacing", + id="21", + ), + pytest.param( + '{{ "absent"\nif (v is version("2.8.0", ">=")\nelse "present" }}', + "", + "invalid", + id="22", + ), + pytest.param( + '{{lookup("x",y+"/foo/"+z+".txt")}}', + '{{ lookup("x", y + "/foo/" + z + ".txt") }}', + "spacing", + id="23", + ), + pytest.param( + "{{ x | map(attribute='value') }}", + "{{ x | map(attribute='value') }}", + "spacing", + id="24", + ), + pytest.param( + "{{ r(a= 1,b= True,c= 0.0,d= '') }}", + "{{ r(a=1, b=True, c=0.0, d='') }}", + "spacing", + id="25", + ), + pytest.param("{{ r(1,[]) }}", "{{ r(1, []) }}", "spacing", id="26"), + pytest.param( + "{{ lookup([ddd ]) }}", + "{{ lookup([ddd]) }}", + "spacing", + id="27", + ), + pytest.param( + "{{ [ x ] if x is string else x }}", + "{{ [x] if x is string else x }}", + "spacing", + id="28", + ), + pytest.param( + "{% if a|int <= 8 -%} iptables {%- else -%} iptables-nft {%- endif %}", + "{% if a | int <= 8 -%} iptables{%- else -%} iptables-nft{%- endif %}", + "spacing", + id="29", + ), + pytest.param( + # "- 2" -> "-2", minus does not get separated when there is no left side + "{{ - 2 }}", + "{{ -2 }}", + "spacing", + id="30", + ), + pytest.param( + # "-2" -> "-2", minus does get an undesired spacing + "{{ -2 }}", + "{{ -2 }}", + "spacing", + id="31", + ), + pytest.param( + # array ranges do not have space added + "{{ foo[2:4] }}", + "{{ foo[2:4] }}", + "spacing", + id="32", + ), + pytest.param( + # array ranges have the extra space removed + "{{ foo[2: 4] }}", + "{{ foo[2:4] }}", + "spacing", + id="33", + ), + pytest.param( + # negative array index + "{{ foo[-1] }}", + "{{ foo[-1] }}", + "spacing", + id="34", + ), + pytest.param( + # negative array index, repair + "{{ foo[- 1] }}", + "{{ foo[-1] }}", + "spacing", + id="35", + ), + pytest.param("{{ a +~'b' }}", "{{ a + ~'b' }}", "spacing", id="36"), + pytest.param( + "{{ (a[: -4] *~ b) }}", + "{{ (a[:-4] * ~b) }}", + "spacing", + id="37", + ), + pytest.param("{{ [a,~ b] }}", "{{ [a, ~b] }}", "spacing", id="38"), + # Not supported yet due to being accepted by black: + pytest.param("{{ item.0.user }}", "{{ item.0.user }}", "spacing", id="39"), + # Not supported by back, while jinja allows ~ to be binary operator: + pytest.param("{{ a ~ b }}", "{{ a ~ b }}", "spacing", id="40"), + pytest.param( + "--format='{{'{{'}}.Size{{'}}'}}'", + "--format='{{ '{{' }}.Size{{ '}}' }}'", + "spacing", + id="41", + ), + pytest.param( + "{{ list_one + {{ list_two | max }} }}", + "{{ list_one + {{list_two | max}} }}", + "spacing", + id="42", + ), + pytest.param( + "{{ lookup('file' , '/tmp/non-existent', errors='ignore') }}", + "{{ lookup('file', '/tmp/non-existent', errors='ignore') }}", + "spacing", + id="43", + ), + # https://github.com/ansible/ansible-lint/pull/3057 + # since jinja 3.0.0, \r is converted to \n if the string has jinja in it + pytest.param( + "{{ 'foo' }}\r{{ 'bar' }}", + "{{ 'foo' }}\n{{ 'bar' }}", + "spacing", + id="44", + ), + # if we do not have any jinja constructs, we should keep original \r + # to match ansible behavior + pytest.param( + "foo\rbar", + "foo\rbar", + "spacing", + id="45", + ), + ), + ) + def test_jinja(text: str, expected: str, tag: str) -> None: + """Tests our ability to spot spacing errors inside jinja2 templates.""" + rule = JinjaRule() + + reformatted, details, returned_tag = rule.check_whitespace( + text, + key="name", + lintable=Lintable("playbook.yml"), + ) + assert tag == returned_tag, details + assert expected == reformatted + + @pytest.mark.parametrize( + ("text", "expected", "tag"), + ( + pytest.param( + "1+2", + "1 + 2", + "spacing", + id="0", + ), + pytest.param( + "- 1", + "-1", + "spacing", + id="1", + ), + # Ensure that we do not choke with double templating on implicit + # and instead we remove them braces. + pytest.param("{{ o | bool }}", "o | bool", "spacing", id="2"), + ), + ) + def test_jinja_implicit(text: str, expected: str, tag: str) -> None: + """Tests our ability to spot spacing errors implicit jinja2 templates.""" + rule = JinjaRule() + # implicit jinja2 are working only inside playbooks and tasks + lintable = Lintable(name="playbook.yml", kind="playbook") + reformatted, details, returned_tag = rule.check_whitespace( + text, + key="when", + lintable=lintable, + ) + assert tag == returned_tag, details + assert expected == reformatted + + @pytest.mark.parametrize( + ("lintable", "matches"), + (pytest.param("examples/playbooks/vars/rule_jinja_vars.yml", 0, id="0"),), + ) + def test_jinja_file(lintable: str, matches: int) -> None: + """Tests our ability to process var filesspot spacing errors.""" + collection = RulesCollection() + collection.register(JinjaRule()) + errs = Runner(lintable, rules=collection).run() + assert len(errs) == matches + for err in errs: + assert isinstance(err, JinjaRule) + assert errs[0].tag == "jinja[invalid]" + assert errs[0].rule.id == "jinja" + + def test_jinja_invalid() -> None: + """Tests our ability to spot spacing errors inside jinja2 templates.""" + collection = RulesCollection() + collection.register(JinjaRule()) + success = "examples/playbooks/rule-jinja-fail.yml" + errs = Runner(success, rules=collection).run() + assert len(errs) == 2 + assert errs[0].tag == "jinja[spacing]" + assert errs[0].rule.id == "jinja" + assert errs[0].lineno == 9 + assert errs[1].tag == "jinja[invalid]" + assert errs[1].rule.id == "jinja" + assert errs[1].lineno == 9 + + def test_jinja_valid() -> None: + """Tests our ability to parse jinja, even when variables may not be defined.""" + collection = RulesCollection() + collection.register(JinjaRule()) + success = "examples/playbooks/rule-jinja-pass.yml" + errs = Runner(success, rules=collection).run() + assert len(errs) == 0 + + +def _get_error_line(task: dict[str, Any], path: list[str | int]) -> int: + """Return error line number.""" + line = task[LINE_NUMBER_KEY] + ctx = task + for _ in path: + ctx = ctx[_] + if LINE_NUMBER_KEY in ctx: + line = ctx[LINE_NUMBER_KEY] + if not isinstance(line, int): + msg = "Line number is not an integer" + raise RuntimeError(msg) + return line diff --git a/src/ansiblelint/rules/key_order.md b/src/ansiblelint/rules/key_order.md new file mode 100644 index 0000000..378d8a5 --- /dev/null +++ b/src/ansiblelint/rules/key_order.md @@ -0,0 +1,63 @@ +# key-order + +This rule recommends reordering key names in ansible content to make +code easier to maintain and less prone to errors. + +Here are some examples of common ordering checks done for tasks and handlers: + +- `name` must always be the first key for plays, tasks and handlers +- on tasks, the `block`, `rescue` and `always` keys must be the last keys, + as this would avoid accidental miss-indentation errors between the last task + and the parent level. + +## Problematic code + +```yaml +--- +- hosts: localhost + name: This is a playbook # <-- name key should be the first one + tasks: + - name: A block + block: + - name: Display a message + debug: + msg: "Hello world!" + when: true # <-- when key should be before block +``` + +## Correct code + +```yaml +--- +- name: This is a playbook + hosts: localhost + tasks: + - name: A block + when: true + block: + - name: Display a message + debug: + msg: "Hello world!" +``` + +## Reasoning + +Making decisions about the optimal order of keys for ansible tasks or plays is +no easy task, as we had a huge number of combinations to consider. This is also +the reason why we started with a minimal sorting rule (name to be the first), +and aimed to gradually add more fields later, and only when we find the proofs +that one approach is likely better than the other. + +### Why I no longer can put `when` after a `block`? + +Try to remember that in real life, `block/rescue/always` have the habit to +grow due to the number of tasks they host inside, making them exceed what a single screen. This would move the `when` task further away from the rest of the task properties. A `when` from the last task inside the block can +easily be confused as being at the block level, or the reverse. When tasks are +moved from one location to another, there is a real risk of moving the block +level when with it. + +By putting the `when` before the `block`, we avoid that kind of risk. The same risk applies to any simple property at the task level, so that is why +we concluded that the block keys must be the last ones. + +Another common practice was to put `tags` as the last property. Still, for the +same reasons, we decided that they should not be put after block keys either. diff --git a/src/ansiblelint/rules/key_order.py b/src/ansiblelint/rules/key_order.py new file mode 100644 index 0000000..897da64 --- /dev/null +++ b/src/ansiblelint/rules/key_order.py @@ -0,0 +1,151 @@ +"""All tasks should be have name come first.""" +from __future__ import annotations + +import functools +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +SORTER_TASKS = ( + "name", + # "__module__", + # "action", + # "args", + None, # <-- None include all modules that not using action and * + # "when", + # "notify", + # "tags", + "block", + "rescue", + "always", +) + + +def get_property_sort_index(name: str) -> int: + """Return the index of the property in the sorter.""" + a_index = -1 + for i, v in enumerate(SORTER_TASKS): + if v == name: + return i + if v is None: + a_index = i + return a_index + + +def task_property_sorter(property1: str, property2: str) -> int: + """Sort task properties based on SORTER.""" + v_1 = get_property_sort_index(property1) + v_2 = get_property_sort_index(property2) + return (v_1 > v_2) - (v_1 < v_2) + + +class KeyOrderRule(AnsibleLintRule): + """Ensure specific order of keys in mappings.""" + + id = "key-order" + shortdesc = __doc__ + severity = "LOW" + tags = ["formatting"] + version_added = "v6.6.2" + needs_raw_task = True + _ids = { + "key-order[task]": "You can improve the task key order", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + result = [] + raw_task = task["__raw_task__"] + keys = [key for key in raw_task if not key.startswith("_")] + sorted_keys = sorted(keys, key=functools.cmp_to_key(task_property_sorter)) + if keys != sorted_keys: + result.append( + self.create_matcherror( + f"You can improve the task key order to: {', '.join(sorted_keys)}", + filename=file, + tag="key-order[task]", + ), + ) + return result + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param("examples/playbooks/rule-key-order-pass.yml", 0, id="pass"), + pytest.param("examples/playbooks/rule-key-order-fail.yml", 6, id="fail"), + ), + ) + def test_key_order_rule( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + assert len(results) == failures + for result in results: + assert result.rule.id == "key-order" + + @pytest.mark.parametrize( + ("properties", "expected"), + ( + pytest.param([], []), + pytest.param(["block", "name"], ["name", "block"]), + pytest.param( + ["block", "name", "action", "..."], + ["name", "action", "...", "block"], + ), + ), + ) + def test_key_order_property_sorter( + properties: list[str], + expected: list[str], + ) -> None: + """Test the task property sorter.""" + result = sorted(properties, key=functools.cmp_to_key(task_property_sorter)) + assert expected == result + + @pytest.mark.parametrize( + ("key", "order"), + ( + pytest.param("name", 0), + pytest.param("action", 1), + pytest.param("foobar", SORTER_TASKS.index(None)), + pytest.param("block", len(SORTER_TASKS) - 3), + pytest.param("rescue", len(SORTER_TASKS) - 2), + pytest.param("always", len(SORTER_TASKS) - 1), + ), + ) + def test_key_order_property_sort_index(key: str, order: int) -> None: + """Test sorting index.""" + assert get_property_sort_index(key) == order + + @pytest.mark.parametrize( + ("prop1", "prop2", "result"), + ( + pytest.param("name", "block", -1), + pytest.param("block", "name", 1), + pytest.param("block", "block", 0), + ), + ) + def test_key_order_property_sortfunc(prop1: str, prop2: str, result: int) -> None: + """Test sorting function.""" + assert task_property_sorter(prop1, prop2) == result diff --git a/src/ansiblelint/rules/latest.md b/src/ansiblelint/rules/latest.md new file mode 100644 index 0000000..1b20432 --- /dev/null +++ b/src/ansiblelint/rules/latest.md @@ -0,0 +1,43 @@ +# latest + +The `latest` rule checks that module arguments like those used for source +control checkout do not have arguments that might generate different results +based on context. + +This more generic rule replaced two older rules named `git-latest` and +`hg-latest`. + +We are aware that there are genuine cases where getting the tip of the main +branch is not accidental. For these cases, just add a comment such as +`# noqa: latest` to the same line to prevent it from triggering. + +## Possible errors messages: + +- `latest[git]` +- `latest[hg]` + +## Problematic code + +```yaml +--- +- name: Example for `latest` rule + hosts: localhost + tasks: + - name: Risky use of git module + ansible.builtin.git: + repo: "https://github.com/ansible/ansible-lint" + version: HEAD # <-- HEAD value is triggering the rule +``` + +## Correct code + +```yaml +--- +- name: Example for `latest` rule + hosts: localhost + tasks: + - name: Safe use of git module + ansible.builtin.git: + repo: "https://github.com/ansible/ansible-lint" + version: abcd1234... # <-- that is safe +``` diff --git a/src/ansiblelint/rules/latest.py b/src/ansiblelint/rules/latest.py new file mode 100644 index 0000000..0838feb --- /dev/null +++ b/src/ansiblelint/rules/latest.py @@ -0,0 +1,46 @@ +"""Implementation of latest rule.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class LatestRule(AnsibleLintRule): + """Result of the command may vary on subsequent runs.""" + + id = "latest" + description = ( + "All version control checkouts must point to " + "an explicit commit or tag, not just ``latest``" + ) + severity = "MEDIUM" + tags = ["idempotency"] + version_added = "v6.5.2" + _ids = { + "latest[git]": "Use a commit hash or tag instead of 'latest' for git", + "latest[hg]": "Use a commit hash or tag instead of 'latest' for hg", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str | MatchError: + """Check if module args are safe.""" + if ( + task["action"]["__ansible_module__"] == "git" + and task["action"].get("version", "HEAD") == "HEAD" + ): + return self.create_matcherror(tag="latest[git]", filename=file) + if ( + task["action"]["__ansible_module__"] == "hg" + and task["action"].get("revision", "default") == "default" + ): + return self.create_matcherror(tag="latest[hg]", filename=file) + return False diff --git a/src/ansiblelint/rules/literal_compare.md b/src/ansiblelint/rules/literal_compare.md new file mode 100644 index 0000000..5e25394 --- /dev/null +++ b/src/ansiblelint/rules/literal_compare.md @@ -0,0 +1,32 @@ +# literal-compare + +This rule checks for literal comparison with the `when` clause. +Literal comparison, like `when: var == True`, is unnecessarily complex. +Use `when: var` to keep your playbooks simple. + +Similarly, a check like `when: var != True` or `when: var == False` +should be replaced with `when: not var`. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Print environment variable to stdout + ansible.builtin.command: echo $MY_ENV_VAR + when: ansible_os_family == True # <- Adds complexity to your playbook. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Print environment variable to stdout + ansible.builtin.command: echo $MY_ENV_VAR + when: ansible_os_family # <- Keeps your playbook simple. +``` diff --git a/src/ansiblelint/rules/literal_compare.py b/src/ansiblelint/rules/literal_compare.py new file mode 100644 index 0000000..1129d1d --- /dev/null +++ b/src/ansiblelint/rules/literal_compare.py @@ -0,0 +1,86 @@ +"""Implementation of the literal-compare rule.""" +# Copyright (c) 2016, Will Thames and contributors +# Copyright (c) 2018-2021, Ansible Project + +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.yaml_utils import nested_items_path + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class ComparisonToLiteralBoolRule(AnsibleLintRule): + """Don't compare to literal True/False.""" + + id = "literal-compare" + description = ( + "Use ``when: var`` rather than ``when: var == True`` " + "(or conversely ``when: not var``)" + ) + severity = "HIGH" + tags = ["idiom"] + version_added = "v4.0.0" + + literal_bool_compare = re.compile("[=!]= ?(True|true|False|false)") + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + for k, v, _ in nested_items_path(task): + if k == "when": + if isinstance(v, str): + if self.literal_bool_compare.search(v): + return True + elif isinstance(v, bool): + pass + else: + for item in v: + if isinstance(item, str) and self.literal_bool_compare.search( + item, + ): + return True + + return False + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param( + "examples/playbooks/rule_literal_compare_fail.yml", + 3, + id="fail", + ), + pytest.param( + "examples/playbooks/rule_literal_compare_pass.yml", + 0, + id="pass", + ), + ), + ) + def test_literal_compare( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + # Enable checking of loop variable prefixes in roles + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == "literal-compare" + assert len(results) == failures diff --git a/src/ansiblelint/rules/loop_var_prefix.md b/src/ansiblelint/rules/loop_var_prefix.md new file mode 100644 index 0000000..33adbd7 --- /dev/null +++ b/src/ansiblelint/rules/loop_var_prefix.md @@ -0,0 +1,78 @@ +# loop-var-prefix + +This rule avoids conflicts with nested looping tasks by configuring a variable +prefix with `loop_var`. Ansible sets `item` as the loop variable. You can use +`loop_var` to specify a prefix for loop variables and ensure they are unique to +each task. + +This rule can produce the following messages: + +- `loop-var-prefix[missing]` - Replace any unsafe implicit `item` loop variable + by adding `loop_var: <loop_var_prefix>...`. +- `loop-var-prefix[wrong]` - Ensure loop variables start with + `<loop_var_prefix>`. + +This rule originates from the [Naming parameters section of Ansible Best +Practices guide][cop314]. + +## Settings + +You can change the behavior of this rule by overriding its default regular +expression used to check loop variable naming. Keep in mind that the `{role}` +part is replaced with the inferred role name when applicable. + +```yaml +# .ansible-lint +loop_var_prefix: "^(__|{role}_)" +``` + +This is an opt-in rule. You must enable it in your Ansible-lint configuration as +follows: + +```yaml +enable_list: + - loop-var-prefix +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Does not set a prefix for loop variables. + ansible.builtin.debug: + var: item + loop: + - foo + - bar # <- These items do not have a unique prefix. + - name: Sets a prefix that is not unique. + ansible.builtin.debug: + var: zz_item + loop: + - foo + - bar + loop_control: + loop_var: zz_item # <- This prefix is not unique. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Sets a unique prefix for loop variables. + ansible.builtin.debug: + var: zz_item + loop: + - foo + - bar + loop_control: + loop_var: my_prefix # <- Specifies a unique prefix for loop variables. +``` + +[cop314]: + https://redhat-cop.github.io/automation-good-practices/#_naming_parameters diff --git a/src/ansiblelint/rules/loop_var_prefix.py b/src/ansiblelint/rules/loop_var_prefix.py new file mode 100644 index 0000000..8f1bb56 --- /dev/null +++ b/src/ansiblelint/rules/loop_var_prefix.py @@ -0,0 +1,113 @@ +"""Optional Ansible-lint rule to enforce use of prefix on role loop vars.""" +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.config import LOOP_VAR_PREFIX, options +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.text import toidentifier + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class RoleLoopVarPrefix(AnsibleLintRule): + """Role loop_var should use configured prefix.""" + + id = "loop-var-prefix" + link = ( + "https://docs.ansible.com/ansible/latest/playbook_guide/" + "playbooks_loops.html#defining-inner-and-outer-variable-names-with-loop-var" + ) + description = """\ +Looping inside roles has the risk of clashing with loops from user-playbooks.\ +""" + + tags = ["idiom"] + prefix = re.compile("") + severity = "MEDIUM" + _ids = { + "loop-var-prefix[wrong]": "Loop variable name does not match regex.", + "loop-var-prefix[missing]": "Replace unsafe implicit `item` loop variable.", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + """Return matches for a task.""" + if not file or not file.role or not options.loop_var_prefix: + return [] + + self.prefix = re.compile( + options.loop_var_prefix.format(role=toidentifier(file.role)), + ) + has_loop = "loop" in task.raw_task + for key in task.raw_task: + if key.startswith("with_"): + has_loop = True + + if has_loop: + loop_control = task.raw_task.get("loop_control", {}) + loop_var = loop_control.get("loop_var", "") + + if loop_var: + if not self.prefix.match(loop_var): + return [ + self.create_matcherror( + message=f"Loop variable name does not match /{options.loop_var_prefix}/ regex, where role={toidentifier(file.role)}.", + filename=file, + tag="loop-var-prefix[wrong]", + ), + ] + else: + return [ + self.create_matcherror( + message=f"Replace unsafe implicit `item` loop variable by adding a `loop_var` that is matching /{options.loop_var_prefix}/ regex.", + filename=file, + tag="loop-var-prefix[missing]", + ), + ] + + return [] + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param( + "examples/playbooks/roles/loop_var_prefix/tasks/pass.yml", + 0, + id="pass", + ), + pytest.param( + "examples/playbooks/roles/loop_var_prefix/tasks/fail.yml", + 6, + id="fail", + ), + ), + ) + def test_loop_var_prefix( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + # Enable checking of loop variable prefixes in roles + options.loop_var_prefix = LOOP_VAR_PREFIX + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == RoleLoopVarPrefix().id + assert len(results) == failures diff --git a/src/ansiblelint/rules/meta_incorrect.md b/src/ansiblelint/rules/meta_incorrect.md new file mode 100644 index 0000000..b1e8793 --- /dev/null +++ b/src/ansiblelint/rules/meta_incorrect.md @@ -0,0 +1,32 @@ +# meta-incorrect + +This rule checks role metadata for fields with undefined or default values. +Always set appropriate values for the following metadata fields in the `meta/main.yml` file: + +- `author` +- `description` +- `company` +- `license` + +## Problematic Code + +```yaml +--- +# Metadata fields for the role contain default values. +galaxy_info: + author: your name + description: your role description + company: your company (optional) + license: license (GPL-2.0-or-later, MIT, etc) +``` + +## Correct Code + +```yaml +--- +galaxy_info: + author: Leroy Jenkins + description: This role will set you free. + company: Red Hat + license: Apache +``` diff --git a/src/ansiblelint/rules/meta_incorrect.py b/src/ansiblelint/rules/meta_incorrect.py new file mode 100644 index 0000000..4252254 --- /dev/null +++ b/src/ansiblelint/rules/meta_incorrect.py @@ -0,0 +1,77 @@ +"""Implementation of meta-incorrect rule.""" +# Copyright (c) 2018, Ansible Project +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +class MetaChangeFromDefaultRule(AnsibleLintRule): + """meta/main.yml default values should be changed.""" + + id = "meta-incorrect" + field_defaults = [ + ("author", "your name"), + ("description", "your description"), + ("company", "your company (optional)"), + ("license", "license (GPLv2, CC-BY, etc)"), + ("license", "license (GPL-2.0-or-later, MIT, etc)"), + ] + values = ", ".join(sorted({f[0] for f in field_defaults})) + description = ( + f"You should set appropriate values in meta/main.yml for these fields: {values}" + ) + severity = "HIGH" + tags = ["metadata"] + version_added = "v4.0.0" + + def matchyaml(self, file: Lintable) -> list[MatchError]: + if file.kind != "meta" or not file.data: + return [] + + galaxy_info = file.data.get("galaxy_info", None) + if not galaxy_info: + return [] + + results = [] + for field, default in self.field_defaults: + value = galaxy_info.get(field, None) + if value and value == default: + results.append( + self.create_matcherror( + filename=file, + lineno=file.data[LINE_NUMBER_KEY], + message=f"Should change default metadata: {field}", + ), + ) + + return results + + +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_default_galaxy_info( + default_rules_collection: RulesCollection, + ) -> None: + """Test for meta-incorrect.""" + results = Runner( + "examples/roles/meta_incorrect_fail", + rules=default_rules_collection, + ).run() + for result in results: + assert result.rule.id == "meta-incorrect" + assert len(results) == 4 + + assert "Should change default metadata: author" in str(results) + assert "Should change default metadata: description" in str(results) + assert "Should change default metadata: company" in str(results) + assert "Should change default metadata: license" in str(results) diff --git a/src/ansiblelint/rules/meta_no_tags.md b/src/ansiblelint/rules/meta_no_tags.md new file mode 100644 index 0000000..9518549 --- /dev/null +++ b/src/ansiblelint/rules/meta_no_tags.md @@ -0,0 +1,22 @@ +# meta-no-tags + +This rule checks role metadata for tags with special characters. +Always use lowercase numbers and letters for tags in the `meta/main.yml` file. + +## Problematic Code + +```yaml +--- +# Metadata tags contain upper-case letters and special characters. +galaxy_info: + galaxy_tags: [MyTag#1, MyTag&^-] +``` + +## Correct Code + +```yaml +--- +# Metadata tags contain only lowercase letters and numbers. +galaxy_info: + galaxy_tags: [mytag1, mytag2] +``` diff --git a/src/ansiblelint/rules/meta_no_tags.py b/src/ansiblelint/rules/meta_no_tags.py new file mode 100644 index 0000000..c27a30e --- /dev/null +++ b/src/ansiblelint/rules/meta_no_tags.py @@ -0,0 +1,159 @@ +"""Implementation of meta-no-tags rule.""" +from __future__ import annotations + +import re +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +# Copyright (c) 2018, Ansible Project + + +if TYPE_CHECKING: + from typing import Any + + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.testing import RunFromText + + +class MetaTagValidRule(AnsibleLintRule): + """Tags must contain lowercase letters and digits only.""" + + id = "meta-no-tags" + description = ( + "Tags must contain lowercase letters and digits only, " + "and ``galaxy_tags`` is expected to be a list" + ) + severity = "HIGH" + tags = ["metadata"] + version_added = "v4.0.0" + + TAG_REGEXP = re.compile("^[a-z0-9]+$") + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Find violations inside meta files.""" + if file.kind != "meta" or not file.data: + return [] + + galaxy_info = file.data.get("galaxy_info", None) + if not galaxy_info: + return [] + + tags = [] + results = [] + + if "galaxy_tags" in galaxy_info: + if isinstance(galaxy_info["galaxy_tags"], list): + tags += galaxy_info["galaxy_tags"] + else: + results.append( + self.create_matcherror( + "Expected 'galaxy_tags' to be a list", + filename=file, + ), + ) + + if "categories" in galaxy_info: + results.append( + self.create_matcherror( + "Use 'galaxy_tags' rather than 'categories'", + filename=file, + ), + ) + if isinstance(galaxy_info["categories"], list): + tags += galaxy_info["categories"] + else: + results.append( + self.create_matcherror( + "Expected 'categories' to be a list", + filename=file, + ), + ) + + for tag in tags: + msg = self.shortdesc + if not isinstance(tag, str): + results.append( + self.create_matcherror( + f"Tags must be strings: '{tag}'", + filename=file, + ), + ) + continue + if not re.match(self.TAG_REGEXP, tag): + results.append( + self.create_matcherror( + message=f"{msg}, invalid: '{tag}'", + filename=file, + ), + ) + + return results + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + @pytest.mark.parametrize( + "rule_runner", + (MetaTagValidRule,), + indirect=["rule_runner"], + ) + def test_valid_tag_rule(rule_runner: RunFromText) -> None: + """Test rule matches.""" + results = rule_runner.run( + Path("examples/roles/meta_no_tags_valid/meta/main.yml"), + ) + assert "Use 'galaxy_tags' rather than 'categories'" in str(results), results + assert "Expected 'categories' to be a list" in str(results) + assert "invalid: 'my s q l'" in str(results) + assert "invalid: 'MYTAG'" in str(results) + + @pytest.mark.parametrize( + "rule_runner", + (MetaTagValidRule,), + indirect=["rule_runner"], + ) + def test_meta_not_tags(rule_runner: Any) -> None: + """Test rule matches.""" + results = rule_runner.run( + "examples/roles/meta_no_tags_galaxy_info/meta/main.yml", + ) + assert results == [] + + @pytest.mark.parametrize( + "rule_runner", + (MetaTagValidRule,), + indirect=["rule_runner"], + ) + def test_no_galaxy_tags_list(rule_runner: Any) -> None: + """Test rule matches.""" + results = rule_runner.run("examples/roles/meta_tags_no_list/meta/main.yml") + assert "Expected 'galaxy_tags' to be a list" in str(results) + + @pytest.mark.parametrize( + "rule_runner", + (MetaTagValidRule,), + indirect=["rule_runner"], + ) + def test_galaxy_categories_as_list(rule_runner: Any) -> None: + """Test rule matches.""" + results = rule_runner.run( + "examples/roles/meta_categories_as_list/meta/main.yml", + ) + assert "Use 'galaxy_tags' rather than 'categories'" in str(results), results + assert "Expected 'categories' to be a list" not in str(results) + + @pytest.mark.parametrize( + "rule_runner", + (MetaTagValidRule,), + indirect=["rule_runner"], + ) + def test_tags_not_a_string(rule_runner: Any) -> None: + """Test rule matches.""" + results = rule_runner.run("examples/roles/meta_tags_not_a_string/meta/main.yml") + assert "Tags must be strings" in str(results) diff --git a/src/ansiblelint/rules/meta_runtime.md b/src/ansiblelint/rules/meta_runtime.md new file mode 100644 index 0000000..6ed6f17 --- /dev/null +++ b/src/ansiblelint/rules/meta_runtime.md @@ -0,0 +1,46 @@ +# meta-runtime + +This rule checks the meta/runtime.yml `requires_ansible` key against the list of currently supported versions of ansible-core. + +This rule can produce messages such: + +- `requires_ansible` key must be set to a supported version. + +Currently supported versions of ansible-core are: + +- `2.9.10` +- `2.11.x` +- `2.12.x` +- `2.13.x` +- `2.14.x` +- `2.15.x` +- `2.16.x` (in development) + +This rule can produce messages such as: + +- `meta-runtime[unsupported-version]` - `requires_ansible` key must contain a supported version, shown in the list above. +- `meta-runtime[invalid-version]` - `requires_ansible` key must be a valid version identifier. + + +## Problematic code + +```yaml +# runtime.yml +--- +requires_ansible: ">=2.9" +``` + + +```yaml +# runtime.yml +--- +requires_ansible: "2.9" +``` + +## Correct code + +```yaml +# runtime.yml +--- +requires_ansible: ">=2.9.10" +``` diff --git a/src/ansiblelint/rules/meta_runtime.py b/src/ansiblelint/rules/meta_runtime.py new file mode 100644 index 0000000..fed7121 --- /dev/null +++ b/src/ansiblelint/rules/meta_runtime.py @@ -0,0 +1,126 @@ +"""Implementation of meta-runtime rule.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from packaging.specifiers import SpecifierSet + +from ansiblelint.rules import AnsibleLintRule + +# Copyright (c) 2018, Ansible Project + + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +class CheckRequiresAnsibleVersion(AnsibleLintRule): + """Required ansible version in meta/runtime.yml must be a supported version.""" + + id = "meta-runtime" + description = ( + "The ``requires_ansible`` key in runtime.yml must specify " + "a supported platform version of ansible-core and be a valid version." + ) + severity = "VERY_HIGH" + tags = ["metadata"] + version_added = "v6.11.0 (last update)" + + # Refer to https://access.redhat.com/support/policy/updates/ansible-automation-platform + # Also add devel to this list + supported_ansible = ["2.9.10", "2.11.", "2.12.", "2.13.", "2.14.", "2.15.", "2.16."] + _ids = { + "meta-runtime[unsupported-version]": "requires_ansible key must be set to a supported version.", + "meta-runtime[invalid-version]": "'requires_ansible' is not a valid requirement specification", + } + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Find violations inside meta files. + + :param file: Input lintable file that is a match for `meta-runtime` + :returns: List of errors matched to the input file + """ + results = [] + + if file.kind != "meta-runtime": + return [] + + version_required = file.data.get("requires_ansible", None) + + if version_required: + if not any( + version in version_required for version in self.supported_ansible + ): + results.append( + self.create_matcherror( + message="requires_ansible key must be set to a supported version.", + tag="meta-runtime[unsupported-version]", + filename=file, + ), + ) + + try: + SpecifierSet(version_required) + except ValueError: + results.append( + self.create_matcherror( + message="'requires_ansible' is not a valid requirement specification", + tag="meta-runtime[invalid-version]", + filename=file, + ), + ) + + return results + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures", "tags"), + ( + pytest.param( + "examples/meta_runtime_version_checks/pass/meta/runtime.yml", + 0, + "meta-runtime[unsupported-version]", + id="pass", + ), + pytest.param( + "examples/meta_runtime_version_checks/fail_0/meta/runtime.yml", + 1, + "meta-runtime[unsupported-version]", + id="fail0", + ), + pytest.param( + "examples/meta_runtime_version_checks/fail_1/meta/runtime.yml", + 1, + "meta-runtime[unsupported-version]", + id="fail1", + ), + pytest.param( + "examples/meta_runtime_version_checks/fail_2/meta/runtime.yml", + 1, + "meta-runtime[invalid-version]", + id="fail2", + ), + ), + ) + def test_meta_supported_version( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + tags: str, + ) -> None: + """Test rule matches.""" + default_rules_collection.register(CheckRequiresAnsibleVersion()) + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == CheckRequiresAnsibleVersion().id + assert result.tag == tags + assert len(results) == failures diff --git a/src/ansiblelint/rules/meta_video_links.md b/src/ansiblelint/rules/meta_video_links.md new file mode 100644 index 0000000..c3f051b --- /dev/null +++ b/src/ansiblelint/rules/meta_video_links.md @@ -0,0 +1,36 @@ +# meta-video-links + +This rule checks formatting for video links in metadata. Always use dictionaries +for items in the `meta/main.yml` file. + +Items in the `video_links` section must be in a dictionary and use the following +keys: + +- `url` +- `title` + +The value of the `url` key must be a shared link from YouTube, Vimeo, or Google +Drive. + +## Problematic Code + +```yaml +--- +galaxy_info: + video_links: + - https://www.youtube.com/watch?v=aWmRepTSFKs&feature=youtu.be # <- Does not use the url key. + - my_bad_key: https://www.youtube.com/watch?v=aWmRepTSFKs&feature=youtu.be # <- Uses an unsupported key. + title: Incorrect key. + - url: www.acme.com/vid # <- Uses an unsupported url format. + title: Incorrect url format. +``` + +## Correct Code + +```yaml +--- +galaxy_info: + video_links: + - url: https://www.youtube.com/watch?v=aWmRepTSFKs&feature=youtu.be # <- Uses a supported shared link with the url key. + title: Correctly formatted video link. +``` diff --git a/src/ansiblelint/rules/meta_video_links.py b/src/ansiblelint/rules/meta_video_links.py new file mode 100644 index 0000000..5d4941a --- /dev/null +++ b/src/ansiblelint/rules/meta_video_links.py @@ -0,0 +1,122 @@ +"""Implementation of meta-video-links rule.""" +# Copyright (c) 2018, Ansible Project +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.constants import FILENAME_KEY, LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from collections.abc import Sequence + + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +class MetaVideoLinksRule(AnsibleLintRule): + """meta/main.yml video_links should be formatted correctly.""" + + id = "meta-video-links" + description = ( + "Items in ``video_links`` in meta/main.yml should be " + "dictionaries, and contain only keys ``url`` and ``title``, " + "and have a shared link from a supported provider" + ) + severity = "LOW" + tags = ["metadata"] + version_added = "v4.0.0" + + VIDEO_REGEXP = { + "google": re.compile(r"https://drive\.google\.com.*file/d/([0-9A-Za-z-_]+)/.*"), + "vimeo": re.compile(r"https://vimeo\.com/([0-9]+)"), + "youtube": re.compile(r"https://youtu\.be/([0-9A-Za-z-_]+)"), + } + + def matchyaml(self, file: Lintable) -> list[MatchError]: + if file.kind != "meta" or not file.data: + return [] + + galaxy_info = file.data.get("galaxy_info", None) + if not galaxy_info: + return [] + + video_links = galaxy_info.get("video_links", None) + if not video_links: + return [] + + results = [] + + for video in video_links: + if not isinstance(video, dict): + results.append( + self.create_matcherror( + "Expected item in 'video_links' to be a dictionary", + filename=file, + ), + ) + continue + + if set(video) != {"url", "title", FILENAME_KEY, LINE_NUMBER_KEY}: + results.append( + self.create_matcherror( + "Expected item in 'video_links' to contain " + "only keys 'url' and 'title'", + filename=file, + ), + ) + continue + + for expr in self.VIDEO_REGEXP.values(): + if expr.match(video["url"]): + break + else: + msg = ( + f"URL format '{video['url']}' is not recognized. " + "Expected it be a shared link from Vimeo, YouTube, " + "or Google Drive." + ) + results.append(self.create_matcherror(msg, filename=file)) + + return results + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param( + "examples/roles/meta_video_links_fail/meta/main.yml", + ( + "Expected item in 'video_links' to be a dictionary", + "Expected item in 'video_links' to contain only keys 'url' and 'title'", + "URL format 'https://www.youtube.com/watch?v=aWmRepTSFKs&feature=youtu.be' is not recognized. Expected it be a shared link from Vimeo, YouTube, or Google Drive.", + "URL format 'www.acme.com/vid' is not recognized", + ), + id="1", + ), + pytest.param( + "examples/roles/meta_video_links_pass/meta/main.yml", + (), + id="2", + ), + ), + ) + def test_video_links( + default_rules_collection: RulesCollection, + test_file: str, + failures: Sequence[str], + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + assert len(results) == len(failures) + for index, result in enumerate(results): + assert result.tag == "meta-video-links" + assert failures[index] in result.message diff --git a/src/ansiblelint/rules/name.md b/src/ansiblelint/rules/name.md new file mode 100644 index 0000000..9df4213 --- /dev/null +++ b/src/ansiblelint/rules/name.md @@ -0,0 +1,61 @@ +# name + +This rule identifies several problems related to the naming of tasks and plays. +This is important because these names are the primary way to **identify** and +**document** executed operations on the console, logs or web interface. + +This rule can produce messages as: + +- `name[casing]` - All names should start with an uppercase letter for languages + that support it. +- `name[missing]` - All tasks should be named. +- `name[play]` - All plays should be named. +- `name[prefix]` - Prefix task names in sub-tasks files. (opt-in) +- `name[template]` - Jinja templates should only be at the end of 'name'. This + helps with the identification of tasks inside the source code when they fail. + The use of templating inside `name` keys is discouraged as there are multiple + cases where the rendering of the name template is not possible. + +If you want to ignore some of the messages above, you can add any of them to the +`skip_list`. + +## name[prefix] + +This rule applies only to included task files that are not named `main.yml`. It +suggests adding the stem of the file as a prefix to the task name. + +For example, if you have a task named `Restart server` inside a file named +`tasks/deploy.yml`, this rule suggests renaming it to `deploy | Restart server`, +so it would be easier to identify where it comes from. + +For the moment, this sub-rule is just an **opt-in**, so you need to add it to +your `enable_list` to activate it. + +!!! note + + This rule was designed by [Red Hat Community of Practice](https://redhat-cop.github.io/automation-good-practices/#_prefix_task_names_in_sub_tasks_files_of_roles). The reasoning behind it being + that in a complex roles or playbooks with multiple (sub-)tasks file, it becomes + difficult to understand which task belongs to which file. Adding a prefix, in + combination with the role’s name automatically added by Ansible, makes it a + lot easier to follow and troubleshoot a role play. + +## Problematic code + +```yaml +--- +- hosts: localhost # <-- playbook name[play] + tasks: + - name: create placefolder file # <-- name[casing] due lack of capital letter + ansible.builtin.command: touch /tmp/.placeholder +``` + +## Correct code + +```yaml +--- +- name: Play for creating placeholder + hosts: localhost + tasks: + - name: Create placeholder file + ansible.builtin.command: touch /tmp/.placeholder +``` diff --git a/src/ansiblelint/rules/name.py b/src/ansiblelint/rules/name.py new file mode 100644 index 0000000..41ce5cb --- /dev/null +++ b/src/ansiblelint/rules/name.py @@ -0,0 +1,260 @@ +"""Implementation of NameRule.""" +from __future__ import annotations + +import re +import sys +from copy import deepcopy +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule, TransformMixin + +if TYPE_CHECKING: + from ruamel.yaml.comments import CommentedMap, CommentedSeq + + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NameRule(AnsibleLintRule, TransformMixin): + """Rule for checking task and play names.""" + + id = "name" + description = ( + "All tasks and plays should have a distinct name for readability " + "and for ``--start-at-task`` to work" + ) + severity = "MEDIUM" + tags = ["idiom"] + version_added = "v6.9.1 (last update)" + _re_templated_inside = re.compile(r".*\{\{.*\}\}.*\w.*$") + _ids = { + "name[play]": "All plays should be named.", + "name[missing]": "All tasks should be named.", + "name[prefix]": "Task name should start with a prefix.", + "name[casing]": "All names should start with an uppercase letter.", + "name[template]": "Jinja templates should only be at the end of 'name'", + } + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific play (entry in playbook).""" + results = [] + if file.kind != "playbook": + return [] + if "name" not in data: + return [ + self.create_matcherror( + message="All plays should be named.", + lineno=data[LINE_NUMBER_KEY], + tag="name[play]", + filename=file, + ), + ] + results.extend( + self._check_name( + data["name"], + lintable=file, + lineno=data[LINE_NUMBER_KEY], + ), + ) + return results + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + results = [] + name = task.get("name") + if not name: + results.append( + self.create_matcherror( + message="All tasks should be named.", + lineno=task[LINE_NUMBER_KEY], + tag="name[missing]", + filename=file, + ), + ) + else: + results.extend( + self._prefix_check( + name, + lintable=file, + lineno=task[LINE_NUMBER_KEY], + ), + ) + return results + + def _prefix_check( + self, + name: str, + lintable: Lintable | None, + lineno: int, + ) -> list[MatchError]: + results: list[MatchError] = [] + effective_name = name + if lintable is None: + return [] + + if not results: + results.extend( + self._check_name( + effective_name, + lintable=lintable, + lineno=lineno, + ), + ) + return results + + def _check_name( + self, + name: str, + lintable: Lintable | None, + lineno: int, + ) -> list[MatchError]: + # This rules applies only to languages that do have uppercase and + # lowercase letter, so we ignore anything else. On Unicode isupper() + # is not necessarily the opposite of islower() + results = [] + # stage one check prefix + effective_name = name + if self._collection and lintable: + prefix = self._collection.options.task_name_prefix.format( + stem=lintable.path.stem, + ) + if lintable.kind == "tasks" and lintable.path.stem != "main": + if not name.startswith(prefix): + # For the moment in order to raise errors this rule needs to be + # enabled manually. Still, we do allow use of prefixes even without + # having to enable the rule. + if "name[prefix]" in self._collection.options.enable_list: + results.append( + self.create_matcherror( + message=f"Task name should start with '{prefix}'.", + lineno=lineno, + tag="name[prefix]", + filename=lintable, + ), + ) + return results + else: + effective_name = name[len(prefix) :] + + if ( + effective_name[0].isalpha() + and effective_name[0].islower() + and not effective_name[0].isupper() + ): + results.append( + self.create_matcherror( + message="All names should start with an uppercase letter.", + lineno=lineno, + tag="name[casing]", + filename=lintable, + ), + ) + if self._re_templated_inside.match(name): + results.append( + self.create_matcherror( + message="Jinja templates should only be at the end of 'name'", + lineno=lineno, + tag="name[template]", + filename=lintable, + ), + ) + return results + + def transform( + self, + match: MatchError, + lintable: Lintable, + data: CommentedMap | CommentedSeq | str, + ) -> None: + if match.tag == "name[casing]": + target_task = self.seek(match.yaml_path, data) + # Not using capitalize(), since that rewrites the rest of the name to lower case + target_task[ + "name" + ] = f"{target_task['name'][:1].upper()}{target_task['name'][1:]}" + match.fixed = True + + +if "pytest" in sys.modules: + from ansiblelint.config import options + from ansiblelint.file_utils import Lintable # noqa: F811 + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner + + def test_file_positive() -> None: + """Positive test for name[missing].""" + collection = RulesCollection() + collection.register(NameRule()) + success = "examples/playbooks/rule-name-missing-pass.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_file_negative() -> None: + """Negative test for name[missing].""" + collection = RulesCollection() + collection.register(NameRule()) + failure = "examples/playbooks/rule-name-missing-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 5 + + def test_name_prefix_negative() -> None: + """Negative test for name[missing].""" + custom_options = deepcopy(options) + custom_options.enable_list = ["name[prefix]"] + collection = RulesCollection(options=custom_options) + collection.register(NameRule()) + failure = Lintable( + "examples/playbooks/tasks/rule-name-prefix-fail.yml", + kind="tasks", + ) + bad_runner = Runner(failure, rules=collection) + results = bad_runner.run() + assert len(results) == 3 + # , "\n".join(results) + assert results[0].tag == "name[casing]" + assert results[1].tag == "name[prefix]" + assert results[2].tag == "name[prefix]" + + def test_rule_name_lowercase() -> None: + """Negative test for a task that starts with lowercase.""" + collection = RulesCollection() + collection.register(NameRule()) + failure = "examples/playbooks/rule-name-casing.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 1 + assert errs[0].tag == "name[casing]" + assert errs[0].rule.id == "name" + + def test_name_play() -> None: + """Positive test for name[play].""" + collection = RulesCollection() + collection.register(NameRule()) + success = "examples/playbooks/rule-name-play-fail.yml" + errs = Runner(success, rules=collection).run() + assert len(errs) == 1 + assert errs[0].tag == "name[play]" + assert errs[0].rule.id == "name" + + def test_name_template() -> None: + """Negative test for name[templated].""" + collection = RulesCollection() + collection.register(NameRule()) + failure = "examples/playbooks/rule-name-templated-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 1 + assert errs[0].tag == "name[template]" + + def test_when_no_lintable() -> None: + """Test when lintable is None.""" + name_rule = NameRule() + # pylint: disable=protected-access + result = name_rule._prefix_check("Foo", None, 1) # noqa: SLF001 + assert len(result) == 0 diff --git a/src/ansiblelint/rules/no_changed_when.md b/src/ansiblelint/rules/no_changed_when.md new file mode 100644 index 0000000..95c1d46 --- /dev/null +++ b/src/ansiblelint/rules/no_changed_when.md @@ -0,0 +1,49 @@ +# no-changed-when + +This rule checks that tasks return changes to results or conditions. Unless +tasks only read information, you should ensure that they return changes in the +following ways: + +- Register results or conditions and use the `changed_when` clause. +- Use the `creates` or `removes` argument. + +You should always use the `changed_when` clause on tasks that do not naturally +detect if a change has occurred or not. Some of the most common examples are +[shell] and [command] modules, which run arbitrary commands. + +One very common workaround is to use a boolean value like `changed_when: false` +if the task never changes anything or `changed_when: true` if it always changes +something, but you can also use any expressions, including ones that use the +registered result of a task, like in our example below. + +This rule also applies to handlers, not only to tasks because they are also +tasks. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Does not handle any output or return codes + ansible.builtin.command: cat {{ my_file | quote }} # <- Does not handle the command output. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Handle shell output with return code + ansible.builtin.command: cat {{ my_file | quote }} + register: my_output # <- Registers the command output. + changed_when: my_output.rc != 0 # <- Uses the return code to define when the task has changed. +``` + +[shell]: + https://docs.ansible.com/ansible/latest/collections/ansible/builtin/shell_module.html +[command]: + https://docs.ansible.com/ansible/latest/collections/ansible/builtin/command_module.html diff --git a/src/ansiblelint/rules/no_changed_when.py b/src/ansiblelint/rules/no_changed_when.py new file mode 100644 index 0000000..28ba427 --- /dev/null +++ b/src/ansiblelint/rules/no_changed_when.py @@ -0,0 +1,106 @@ +"""Implementation of the no-changed-when rule.""" +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class CommandHasChangesCheckRule(AnsibleLintRule): + """Commands should not change things if nothing needs doing.""" + + id = "no-changed-when" + severity = "HIGH" + tags = ["command-shell", "idempotency"] + version_added = "historic" + + _commands = [ + "ansible.builtin.command", + "ansible.builtin.shell", + "ansible.builtin.raw", + "ansible.legacy.command", + "ansible.legacy.shell", + "ansible.legacy.raw", + "command", + "shell", + "raw", + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + result = [] + # tasks in a block are "meta" type + if ( + task["__ansible_action_type__"] in ["task", "meta"] + and task["action"]["__ansible_module__"] in self._commands + and ( + "changed_when" not in task.raw_task + and "creates" not in task["action"] + and "removes" not in task["action"] + ) + ): + result.append(self.create_matcherror(filename=file)) + return result + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/playbooks/rule-no-changed-when-pass.yml", + 0, + id="pass", + ), + pytest.param( + "examples/playbooks/rule-no-changed-when-fail.yml", + 3, + id="fail", + ), + ), + ) + def test_rule_no_changed_when( + default_rules_collection: RulesCollection, + file: str, + expected: int, + ) -> None: + """Validate no-changed-when rule.""" + results = Runner(file, rules=default_rules_collection).run() + + for result in results: + assert result.rule.id == CommandHasChangesCheckRule.id, result + assert len(results) == expected diff --git a/src/ansiblelint/rules/no_free_form.md b/src/ansiblelint/rules/no_free_form.md new file mode 100644 index 0000000..0ffc0ac --- /dev/null +++ b/src/ansiblelint/rules/no_free_form.md @@ -0,0 +1,58 @@ +# no-free-form + +This rule identifies any use of +[free-form](https://docs.ansible.com/ansible/2.7/user_guide/playbooks_intro.html#action-shorthand) +module calling syntax and asks for switching to the full syntax. + +**Free-form** syntax, also known as **inline** or **shorthand**, can produce +subtle bugs. It can also prevent editors and IDEs from providing feedback, +autocomplete and validation for the edited line. + +!!! note + + As long you just pass a YAML string that contains a `=` character inside as the + parameter to the action module name, we consider this as using free-form syntax. + Be sure you pass a dictionary to the module, so the free-form parsing is never + triggered. + +As `raw` module only accepts free-form, we trigger `no-free-form[raw]` only if +we detect the presence of `executable=` inside raw calls. We advise the explicit +use of `args:` for configuring the executable to be run. + +This rule can produce messages as: + +- `no-free-form` - Free-form syntax is discouraged. +- `no-free-form[raw-non-string]` - Passing a non-string value to `raw` module is + neither documented nor supported. + +## Problematic code + +```yaml +--- +- name: Example with discouraged free-form syntax + hosts: localhost + tasks: + - name: Create a placefolder file + ansible.builtin.command: chdir=/tmp touch foo # <-- don't use free-form + - name: Use raw to echo + ansible.builtin.raw: executable=/bin/bash echo foo # <-- don't use executable= + changed_when: false +``` + +## Correct code + +```yaml +--- +- name: Example that avoids free-form syntax + hosts: localhost + tasks: + - name: Create a placefolder file + ansible.builtin.command: + cmd: touch foo # <-- ansible will not touch it + chdir: /tmp + - name: Use raw to echo + ansible.builtin.raw: echo foo + args: + executable: /bin/bash # <-- explicit is better + changed_when: false +``` diff --git a/src/ansiblelint/rules/no_free_form.py b/src/ansiblelint/rules/no_free_form.py new file mode 100644 index 0000000..e89333b --- /dev/null +++ b/src/ansiblelint/rules/no_free_form.py @@ -0,0 +1,116 @@ +"""Implementation of NoFreeFormRule.""" +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.constants import INCLUSION_ACTION_NAMES, LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NoFreeFormRule(AnsibleLintRule): + """Rule for detecting discouraged free-form syntax for action modules.""" + + id = "no-free-form" + description = "Avoid free-form inside files as it can produce subtle bugs." + severity = "MEDIUM" + tags = ["syntax", "risk"] + version_added = "v6.8.0" + needs_raw_task = True + cmd_shell_re = re.compile( + r"(chdir|creates|executable|removes|stdin|stdin_add_newline|warn)=", + ) + _ids = { + "no-free-form[raw]": "Avoid embedding `executable=` inside raw calls, use explicit args dictionary instead.", + "no-free-form[raw-non-string]": "Passing a non string value to `raw` module is neither documented or supported.", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + results: list[MatchError] = [] + action = task["action"]["__ansible_module_original__"] + + if action in INCLUSION_ACTION_NAMES: + return results + + action_value = task["__raw_task__"].get(action, None) + if task["action"].get("__ansible_module__", None) == "raw": + if isinstance(action_value, str): + if "executable=" in action_value: + results.append( + self.create_matcherror( + message="Avoid embedding `executable=` inside raw calls, use explicit args dictionary instead.", + lineno=task[LINE_NUMBER_KEY], + filename=file, + tag=f"{self.id}[raw]", + ), + ) + else: + results.append( + self.create_matcherror( + message="Passing a non string value to `raw` module is neither documented or supported.", + lineno=task[LINE_NUMBER_KEY], + filename=file, + tag=f"{self.id}[raw-non-string]", + ), + ) + elif isinstance(action_value, str) and "=" in action_value: + fail = False + if task["action"].get("__ansible_module__") in ( + "ansible.builtin.command", + "ansible.builtin.shell", + "ansible.windows.win_command", + "ansible.windows.win_shell", + "command", + "shell", + "win_command", + "win_shell", + ): + if self.cmd_shell_re.match(action_value): + fail = True + else: + fail = True + if fail: + results.append( + self.create_matcherror( + message=f"Avoid using free-form when calling module actions. ({action})", + lineno=task[LINE_NUMBER_KEY], + filename=file, + ), + ) + return results + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param("examples/playbooks/rule-no-free-form-pass.yml", 0, id="pass"), + pytest.param("examples/playbooks/rule-no-free-form-fail.yml", 3, id="fail"), + ), + ) + def test_rule_no_free_form( + default_rules_collection: RulesCollection, + file: str, + expected: int, + ) -> None: + """Validate that rule works as intended.""" + results = Runner(file, rules=default_rules_collection).run() + + for result in results: + assert result.rule.id == NoFreeFormRule.id, result + assert len(results) == expected diff --git a/src/ansiblelint/rules/no_handler.md b/src/ansiblelint/rules/no_handler.md new file mode 100644 index 0000000..4deccaa --- /dev/null +++ b/src/ansiblelint/rules/no_handler.md @@ -0,0 +1,55 @@ +# no-handler + +This rule checks for the correct handling of changes to results or conditions. + +If a task has a `when: result.changed` condition, it effectively acts as a +[handler](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers). +The recommended approach is to use `notify` and move tasks to `handlers`. +If necessary you can silence the rule by add a `# noqa: no-handler` comment at the end of the line. + +## Problematic Code + +```yaml +--- +- name: Example of no-handler rule + hosts: localhost + tasks: + - name: Register result of a task + ansible.builtin.copy: + dest: "/tmp/placeholder" + content: "Ansible made this!" + mode: 0600 + register: result # <-- Registers the result of the task. + - name: Second command to run + ansible.builtin.debug: + msg: The placeholder file was modified! + when: result.changed # <-- Triggers the no-handler rule. +``` + +```yaml +--- +# Optionally silences the rule. +when: result.changed # noqa: no-handler +``` + +## Correct Code + +The following code includes the same functionality as the problematic code without recording a `result` variable. + +```yaml +--- +- name: Example of no-handler rule + hosts: localhost + tasks: + - name: Register result of a task + ansible.builtin.copy: + dest: "/tmp/placeholder" + content: "Ansible made this!" + mode: 0600 + notify: + - Second command to run # <-- Handler runs only when the file changes. + handlers: + - name: Second command to run + ansible.builtin.debug: + msg: The placeholder file was modified! +``` diff --git a/src/ansiblelint/rules/no_handler.py b/src/ansiblelint/rules/no_handler.py new file mode 100644 index 0000000..380fd61 --- /dev/null +++ b/src/ansiblelint/rules/no_handler.py @@ -0,0 +1,108 @@ +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +"""UseHandlerRatherThanWhenChangedRule used with ansible-lint.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +def _changed_in_when(item: str) -> bool: + if not isinstance(item, str): + return False + item_list = item.split() + + if {"and", "or", "not"} & set(item_list): + return False + return any( + changed in item + for changed in [ + ".changed", + "|changed", + '["changed"]', + "['changed']", + "is changed", + ] + ) + + +class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule): + """Tasks that run when changed should likely be handlers.""" + + id = "no-handler" + description = ( + "If a task has a ``when: result.changed`` setting, it is effectively " + "acting as a handler. You could use ``notify`` and move that task to " + "``handlers``." + ) + link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers" + severity = "MEDIUM" + tags = ["idiom"] + version_added = "historic" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["__ansible_action_type__"] != "task": + return False + + when = task.get("when") + + if isinstance(when, list): + if len(when) > 1: + return False + return _changed_in_when(when[0]) + if isinstance(when, str): + return _changed_in_when(when) + return False + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"), + pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"), + ), + ) + def test_no_handler( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + assert len(results) == failures + for result in results: + assert result.tag == "no-handler" diff --git a/src/ansiblelint/rules/no_jinja_when.md b/src/ansiblelint/rules/no_jinja_when.md new file mode 100644 index 0000000..702e807 --- /dev/null +++ b/src/ansiblelint/rules/no_jinja_when.md @@ -0,0 +1,32 @@ +# no-jinja-when + +This rule checks conditional statements for Jinja expressions in curly brackets `{{ }}`. +Ansible processes conditionals statements that use the `when`, `failed_when`, and `changed_when` clauses as Jinja expressions. + +An Ansible rule is to always use `{{ }}` except with `when` keys. +Using `{{ }}` in conditionals creates a nested expression, which is an Ansible +anti-pattern and does not produce expected results. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Shut down Debian systems + ansible.builtin.command: /sbin/shutdown -t now + when: "{{ ansible_facts['os_family'] == 'Debian' }}" # <- Nests a Jinja expression in a conditional statement. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Shut down Debian systems + ansible.builtin.command: /sbin/shutdown -t now + when: ansible_facts['os_family'] == "Debian" # <- Uses facts in a conditional statement. +``` diff --git a/src/ansiblelint/rules/no_jinja_when.py b/src/ansiblelint/rules/no_jinja_when.py new file mode 100644 index 0000000..807081d --- /dev/null +++ b/src/ansiblelint/rules/no_jinja_when.py @@ -0,0 +1,90 @@ +"""Implementation of no-jinja-when rule.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NoFormattingInWhenRule(AnsibleLintRule): + """No Jinja2 in when.""" + + id = "no-jinja-when" + description = ( + "``when`` is a raw Jinja2 expression, remove redundant {{ }} from variable(s)." + ) + severity = "HIGH" + tags = ["deprecations"] + version_added = "historic" + + @staticmethod + def _is_valid(when: str) -> bool: + if isinstance(when, list): + for item in when: + if ( + isinstance(item, str) + and item.find("{{") != -1 + and item.find("}}") != -1 + ): + return False + return True + if not isinstance(when, str): + return True + return when.find("{{") == -1 and when.find("}}") == -1 + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + errors: list[MatchError] = [] + if isinstance(data, dict): + if "roles" not in data or data["roles"] is None: + return errors + for role in data["roles"]: + if ( + isinstance(role, dict) + and "when" in role + and not self._is_valid(role["when"]) + ): + errors.append( + self.create_matcherror( + details=str({"when": role}), + filename=file, + lineno=role[LINE_NUMBER_KEY], + ), + ) + return errors + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + return "when" in task.raw_task and not self._is_valid(task.raw_task["when"]) + + +if "pytest" in sys.modules: + # Tests for no-jinja-when rule. + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner + + def test_jinja_file_positive() -> None: + """Positive test for no-jinja-when.""" + collection = RulesCollection() + collection.register(NoFormattingInWhenRule()) + success = "examples/playbooks/rule-no-jinja-when-pass.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_jinja_file_negative() -> None: + """Negative test for no-jinja-when.""" + collection = RulesCollection() + collection.register(NoFormattingInWhenRule()) + failure = "examples/playbooks/rule-no-jinja-when-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 3 diff --git a/src/ansiblelint/rules/no_log_password.md b/src/ansiblelint/rules/no_log_password.md new file mode 100644 index 0000000..579dd11 --- /dev/null +++ b/src/ansiblelint/rules/no_log_password.md @@ -0,0 +1,45 @@ +# no-log-password + +This rule ensures playbooks do not write passwords to logs when using loops. +Always set the `no_log: true` attribute to protect sensitive data. + +While most Ansible modules mask sensitive data, using secrets inside a loop can result in those secrets being logged. +Explicitly adding `no_log: true` prevents accidentally exposing secrets. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Log user passwords + ansible.builtin.user: + name: john_doe + comment: John Doe + uid: 1040 + group: admin + password: "{{ item }}" + with_items: + - wow + no_log: false # <- Sets the no_log attribute to false. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Do not log user passwords + ansible.builtin.user: + name: john_doe + comment: John Doe + uid: 1040 + group: admin + password: "{{ item }}" + with_items: + - wow + no_log: true # <- Sets the no_log attribute to a non-false value. +``` diff --git a/src/ansiblelint/rules/no_log_password.py b/src/ansiblelint/rules/no_log_password.py new file mode 100644 index 0000000..7cc7439 --- /dev/null +++ b/src/ansiblelint/rules/no_log_password.py @@ -0,0 +1,306 @@ +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""NoLogPasswordsRule used with ansible-lint.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import Task, convert_to_boolean + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + + +class NoLogPasswordsRule(AnsibleLintRule): + """Password should not be logged.""" + + id = "no-log-password" + description = ( + "When passing password argument you should have no_log configured " + "to a non False value to avoid accidental leaking of secrets." + ) + severity = "LOW" + tags = ["opt-in", "security", "experimental"] + version_added = "v5.0.9" + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["action"]["__ansible_module_original__"] == "ansible.builtin.user" and ( + task["action"].get("password_lock") and not task["action"].get("password") + ): + has_password = False + else: + for param in task["action"]: + if "password" in param: + has_password = True + break + else: + has_password = False + + has_loop = [key for key in task if key.startswith("with_") or key == "loop"] + # No no_log and no_log: False behave the same way + # and should return a failure (return True), so we + # need to invert the boolean + no_log = task.get("no_log", False) + + if ( + isinstance(no_log, str) + and no_log.startswith("{{") + and no_log.endswith("}}") + ): + # we cannot really evaluate jinja expressions + return False + + return bool( + has_password and not convert_to_boolean(no_log) and len(has_loop) > 0, + ) + + +if "pytest" in sys.modules: + import pytest + + if TYPE_CHECKING: + from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports + + NO_LOG_UNUSED = """ +- name: Test + hosts: all + tasks: + - name: Succeed when no_log is not used but no loop present + ansible.builtin.user: + name: john_doe + password: "wow" + state: absent +""" + + NO_LOG_FALSE = """ +- hosts: all + tasks: + - name: Use of jinja for no_log is valid + user: + name: john_doe + user_password: "{{ item }}" + state: absent + no_log: "{{ False }}" + - name: Fail when no_log is set to False + user: + name: john_doe + user_password: "{{ item }}" + state: absent + with_items: + - wow + - now + no_log: False + - name: Fail when no_log is set to False + ansible.builtin.user: + name: john_doe + user_password: "{{ item }}" + state: absent + with_items: + - wow + - now + no_log: False +""" + + NO_LOG_NO = """ +- hosts: all + tasks: + - name: Fail when no_log is set to no + user: + name: john_doe + password: "{{ item }}" + state: absent + no_log: no + loop: + - wow + - now +""" + + PASSWORD_WITH_LOCK = """ +- hosts: all + tasks: + - name: Fail when password is set and password_lock is true + user: + name: "{{ item }}" + password: "wow" + password_lock: true + with_random_choice: + - ansible + - lint +""" + + NO_LOG_YES = """ +- hosts: all + tasks: + - name: Succeed when no_log is set to yes + with_list: + - name: user + password: wow + - password: now + name: ansible + user: + name: "{{ item.name }}" + password: "{{ item.password }}" + state: absent + no_log: yes +""" + + NO_LOG_TRUE = """ +- hosts: all + tasks: + - name: Succeed when no_log is set to True + user: + name: john_doe + user_password: "{{ item }}" + state: absent + no_log: True + loop: + - wow + - now +""" + + PASSWORD_LOCK_YES = """ +- hosts: all + tasks: + - name: Succeed when only password locking account + user: + name: "{{ item }}" + password_lock: yes + # user_password: "this is a comment, not a password" + with_list: + - ansible + - lint +""" + + PASSWORD_LOCK_YES_BUT_NO_PASSWORD = """ +- hosts: all + tasks: + - name: Succeed when only password locking account + ansible.builtin.user: + name: "{{ item }}" + password_lock: yes + # user_password: "this is a comment, not a password" + with_list: + - ansible + - lint +""" + + PASSWORD_LOCK_FALSE = """ +- hosts: all + tasks: + - name: Succeed when password_lock is false and password is not used + user: + name: lint + password_lock: False +""" + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_unused(rule_runner: RunFromText) -> None: + """The task does not use no_log but also no loop.""" + results = rule_runner.run_playbook(NO_LOG_UNUSED) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_false(rule_runner: RunFromText) -> None: + """The task sets no_log to false.""" + results = rule_runner.run_playbook(NO_LOG_FALSE) + assert len(results) == 2 + for result in results: + assert result.rule.id == "no-log-password" + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_no(rule_runner: RunFromText) -> None: + """The task sets no_log to no.""" + results = rule_runner.run_playbook(NO_LOG_NO) + assert len(results) == 1 + assert results[0].rule.id == "no-log-password" + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_password_with_lock(rule_runner: RunFromText) -> None: + """The task sets a password but also lock the user.""" + results = rule_runner.run_playbook(PASSWORD_WITH_LOCK) + assert len(results) == 1 + assert results[0].rule.id == "no-log-password" + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_yes(rule_runner: RunFromText) -> None: + """The task sets no_log to yes.""" + results = rule_runner.run_playbook(NO_LOG_YES) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_true(rule_runner: RunFromText) -> None: + """The task sets no_log to true.""" + results = rule_runner.run_playbook(NO_LOG_TRUE) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_password_lock_yes(rule_runner: RunFromText) -> None: + """The task only locks the user.""" + results = rule_runner.run_playbook(PASSWORD_LOCK_YES) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_no_log_password_lock_yes_but_no_password(rule_runner: RunFromText) -> None: + """The task only locks the user.""" + results = rule_runner.run_playbook(PASSWORD_LOCK_YES_BUT_NO_PASSWORD) + assert len(results) == 0 + + @pytest.mark.parametrize( + "rule_runner", + (NoLogPasswordsRule,), + indirect=["rule_runner"], + ) + def test_password_lock_false(rule_runner: RunFromText) -> None: + """The task does not actually lock the user.""" + results = rule_runner.run_playbook(PASSWORD_LOCK_FALSE) + assert len(results) == 0 diff --git a/src/ansiblelint/rules/no_prompting.md b/src/ansiblelint/rules/no_prompting.md new file mode 100644 index 0000000..7e525c8 --- /dev/null +++ b/src/ansiblelint/rules/no_prompting.md @@ -0,0 +1,35 @@ +# no-prompting + +This rule checks for `vars_prompt` or the `ansible.builtin.pause` module in playbooks. +You should enable this rule to ensure that playbooks can run unattended and in CI/CD pipelines. + +This is an opt-in rule. +You must enable it in your Ansible-lint configuration as follows: + +```yaml +enable_list: + - no-prompting +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + vars_prompt: # <- Prompts the user to input credentials. + - name: username + prompt: What is your username? + private: false + + - name: password + prompt: What is your password? + tasks: + - name: Pause for 5 minutes + ansible.builtin.pause: + minutes: 5 # <- Pauses playbook execution for a set period of time. +``` + +## Correct Code + +Correct code for this rule is to omit `vars_prompt` and the `ansible.builtin.pause` module from your playbook. diff --git a/src/ansiblelint/rules/no_prompting.py b/src/ansiblelint/rules/no_prompting.py new file mode 100644 index 0000000..6622771 --- /dev/null +++ b/src/ansiblelint/rules/no_prompting.py @@ -0,0 +1,76 @@ +"""Implementation of no-prompting rule.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NoPromptingRule(AnsibleLintRule): + """Disallow prompting.""" + + id = "no-prompting" + description = ( + "Disallow the use of vars_prompt or ansible.builtin.pause to better" + "accommodate unattended playbook runs and use in CI pipelines." + ) + tags = ["opt-in"] + severity = "VERY_LOW" + version_added = "v6.0.3" + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific playbook.""" + # If the Play uses the 'vars_prompt' section to set variables + + if file.kind != "playbook": # pragma: no cover + return [] + + vars_prompt = data.get("vars_prompt", None) + if not vars_prompt: + return [] + return [ + self.create_matcherror( + message="Play uses vars_prompt", + lineno=vars_prompt[0][LINE_NUMBER_KEY], + filename=file, + ), + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + """Return matches for ansible.builtin.pause tasks.""" + # We do not want to trigger this rule if pause has either seconds or + # minutes defined, as that does not make it blocking. + return task["action"]["__ansible_module_original__"] in [ + "pause", + "ansible.builtin.pause", + ] and not ( + task["action"].get("minutes", None) or task["action"].get("seconds", None) + ) + + +if "pytest" in sys.modules: + from ansiblelint.config import options + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_no_prompting_fail() -> None: + """Negative test for no-prompting.""" + # For testing we want to manually enable opt-in rules + options.enable_list = ["no-prompting"] + rules = RulesCollection(options=options) + rules.register(NoPromptingRule()) + results = Runner("examples/playbooks/rule-no-prompting.yml", rules=rules).run() + assert len(results) == 2 + for result in results: + assert result.rule.id == "no-prompting" diff --git a/src/ansiblelint/rules/no_relative_paths.md b/src/ansiblelint/rules/no_relative_paths.md new file mode 100644 index 0000000..568a145 --- /dev/null +++ b/src/ansiblelint/rules/no_relative_paths.md @@ -0,0 +1,94 @@ +# no-relative-paths + +This rule checks for relative paths in the `ansible.builtin.copy` and +`ansible.builtin.template` modules. + +Relative paths in a task most often direct Ansible to remote files and +directories on managed nodes. In the `ansible.builtin.copy` and +`ansible.builtin.template` modules, the `src` argument refers to local files and +directories on the control node. + +The recommended locations to store files are as follows: + +- Use the `files/` folder in the playbook or role directory for the `copy` + module. +- Use the `templates/` folder in the playbook or role directory for the + `template` module. + +These folders allow you to omit the path or use a sub-folder when specifying +files with the `src` argument. + +!!! note + + If resources are outside your Ansible playbook or role directory you should use an absolute path with the `src` argument. + +!!! warning + + Do not store resources at the same directory level as your Ansible playbook or tasks files. + Doing this can result in disorganized projects and cause user confusion when distinguishing between resources of the same type, such as YAML. + +See +[task paths](https://docs.ansible.com/ansible/latest/playbook_guide/playbook_pathing.html#task-paths) +in the Ansible documentation for more information. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Template a file to /etc/file.conf + ansible.builtin.template: + src: ../my_templates/foo.j2 # <- Uses a relative path in the src argument. + dest: /etc/file.conf + owner: bin + group: wheel + mode: "0644" +``` + +```yaml +- name: Example playbook + hosts: all + vars: + source_path: ../../my_templates/foo.j2 # <- Sets a variable to a relative path. + tasks: + - name: Copy a file to /etc/file.conf + ansible.builtin.copy: + src: "{{ source_path }}" # <- Uses the variable in the src argument. + dest: /etc/foo.conf + owner: foo + group: foo + mode: "0644" +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Template a file to /etc/file.conf + ansible.builtin.template: + src: foo.j2 # <- Uses a path from inside templates/ directory. + dest: /etc/file.conf + owner: bin + group: wheel + mode: "0644" +``` + +```yaml +- name: Example playbook + hosts: all + vars: + source_path: foo.j2 # <- Uses a path from inside files/ directory. + tasks: + - name: Copy a file to /etc/file.conf + ansible.builtin.copy: + src: "{{ source_path }}" # <- Uses the variable in the src argument. + dest: /etc/foo.conf + owner: foo + group: foo + mode: "0644" +``` diff --git a/src/ansiblelint/rules/no_relative_paths.py b/src/ansiblelint/rules/no_relative_paths.py new file mode 100644 index 0000000..470b1b8 --- /dev/null +++ b/src/ansiblelint/rules/no_relative_paths.py @@ -0,0 +1,75 @@ +"""Implementation of no-relative-paths rule.""" +# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp> +# Copyright (c) 2018, Ansible Project + +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class RoleRelativePath(AnsibleLintRule): + """The src argument should not use a relative path.""" + + id = "no-relative-paths" + description = "The ``copy`` and ``template`` modules should not use relative path for ``src``." + severity = "HIGH" + tags = ["idiom"] + version_added = "v4.0.0" + + _module_to_path_folder = { + "copy": "files", + "win_copy": "files", + "template": "templates", + "win_template": "win_templates", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + module = task["action"]["__ansible_module__"] + if module not in self._module_to_path_folder: + return False + + if "src" not in task["action"]: + return False + + path_to_check = f"../{self._module_to_path_folder[module]}" + if path_to_check in task["action"]["src"]: + return True + + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param("examples/playbooks/no_relative_paths_fail.yml", 2, id="fail"), + pytest.param("examples/playbooks/no_relative_paths_pass.yml", 0, id="pass"), + ), + ) + def test_no_relative_paths( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + assert len(results) == failures + for result in results: + assert result.tag == "no-relative-paths" diff --git a/src/ansiblelint/rules/no_same_owner.md b/src/ansiblelint/rules/no_same_owner.md new file mode 100644 index 0000000..350a3d4 --- /dev/null +++ b/src/ansiblelint/rules/no_same_owner.md @@ -0,0 +1,55 @@ +# no-same-owner + +This rule checks that the owner and group do not transfer across hosts. + +In many cases the owner and group on remote hosts do not match the owner and group assigned to source files. +Preserving the owner and group during transfer can result in errors with permissions or leaking sensitive information. + +When you synchronize files, you should avoid transferring the owner and group by setting `owner: false` and `group: false` arguments. +When you unpack archives with the `ansible.builtin.unarchive` module you should set the `--no-same-owner` option. + +This is an opt-in rule. +You must enable it in your Ansible-lint configuration as follows: + +```yaml +enable_list: + - no-same-owner +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Synchronize conf file + ansible.posix.synchronize: + src: /path/conf.yaml + dest: /path/conf.yaml # <- Transfers the owner and group for the file. + - name: Extract tarball to path + ansible.builtin.unarchive: + src: "{{ file }}.tar.gz" + dest: /my/path/ # <- Transfers the owner and group for the file. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Synchronize conf file + ansible.posix.synchronize: + src: /path/conf.yaml + dest: /path/conf.yaml + owner: false + group: false # <- Does not transfer the owner and group for the file. + - name: Extract tarball to path + ansible.builtin.unarchive: + src: "{{ file }}.tar.gz" + dest: /my/path/ + extra_opts: + - --no-same-owner # <- Does not transfer the owner and group for the file. +``` diff --git a/src/ansiblelint/rules/no_same_owner.py b/src/ansiblelint/rules/no_same_owner.py new file mode 100644 index 0000000..021900e --- /dev/null +++ b/src/ansiblelint/rules/no_same_owner.py @@ -0,0 +1,114 @@ +"""Optional rule for avoiding keeping owner/group when transferring files.""" +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING, Any + +from ansible.utils.sentinel import Sentinel + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NoSameOwnerRule(AnsibleLintRule): + """Do not preserve the owner and group when transferring files across hosts.""" + + id = "no-same-owner" + description = """ +Optional rule that highlights dangers of assuming that user/group on the remote +machines may not exist on ansible controller or vice versa. Owner and group +should not be preserved when transferring files between them. +""" + severity = "LOW" + tags = ["opt-in"] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + """Return matches for a task.""" + action = task.get("action") + if not isinstance(action, dict): # pragma: no cover + return False + + module = action["__ansible_module__"] + + if module in ["synchronize", "ansible.posix.synchronize"]: + return self.handle_synchronize(task, action) + + if module in ["unarchive", "ansible.builtin.unarchive"]: + return self.handle_unarchive(task, action) + + return False + + @staticmethod + def handle_synchronize(task: Any, action: dict[str, Any]) -> bool: + """Process a synchronize task.""" + if task.get("delegate_to") != Sentinel: + return False + + archive = action.get("archive", True) + if action.get("owner", archive) or action.get("group", archive): + return True + return False + + @staticmethod + def handle_unarchive(task: Any, action: dict[str, Any]) -> bool: + """Process unarchive task.""" + delegate_to = task.get("delegate_to") + if ( + delegate_to == "localhost" + or delegate_to != "localhost" + and not action.get("remote_src") + ): + src = action.get("src") + if not isinstance(src, str): + return False + + if src.endswith("zip") and "-X" in action.get("extra_opts", []): + return True + if re.search( + r".*\.tar(\.(gz|bz2|xz))?$", + src, + ) and "--no-same-owner" not in action.get("extra_opts", []): + return True + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures"), + ( + pytest.param( + "examples/roles/role_for_no_same_owner/tasks/fail.yml", + 12, + id="fail", + ), + pytest.param( + "examples/roles/role_for_no_same_owner/tasks/pass.yml", + 0, + id="pass", + ), + ), + ) + def test_no_same_owner_rule( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + assert len(results) == failures + for result in results: + assert result.message == NoSameOwnerRule().shortdesc diff --git a/src/ansiblelint/rules/no_tabs.md b/src/ansiblelint/rules/no_tabs.md new file mode 100644 index 0000000..7895122 --- /dev/null +++ b/src/ansiblelint/rules/no_tabs.md @@ -0,0 +1,38 @@ +# no-tabs + +This rule checks for the tab character. The `\t` tab character can result in +unexpected display or formatting issues. You should always use spaces instead of +tabs. + +!!! note + + This rule does not trigger alerts for tab characters in the ``ansible.builtin.lineinfile`` module. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Do not trigger the rule + ansible.builtin.lineinfile: + path: some.txt + regexp: '^\t$' + line: 'string with \t inside' + - name: Trigger the rule with a debug message + ansible.builtin.debug: + msg: "Using the \t character can cause formatting issues." # <- Includes the tab character. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Do not trigger the no-tabs rule + ansible.builtin.debug: + msg: "Using space characters avoids formatting issues." +``` diff --git a/src/ansiblelint/rules/no_tabs.py b/src/ansiblelint/rules/no_tabs.py new file mode 100644 index 0000000..c53f1bb --- /dev/null +++ b/src/ansiblelint/rules/no_tabs.py @@ -0,0 +1,67 @@ +"""Implementation of no-tabs rule.""" +# Copyright (c) 2016, Will Thames and contributors +# Copyright (c) 2018, Ansible Project +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.yaml_utils import nested_items_path + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class NoTabsRule(AnsibleLintRule): + """Most files should not contain tabs.""" + + id = "no-tabs" + description = "Tabs can cause unexpected display issues, use spaces" + severity = "LOW" + tags = ["formatting"] + version_added = "v4.0.0" + allow_list = [ + ("lineinfile", "insertafter"), + ("lineinfile", "insertbefore"), + ("lineinfile", "regexp"), + ("lineinfile", "line"), + ("ansible.builtin.lineinfile", "insertafter"), + ("ansible.builtin.lineinfile", "insertbefore"), + ("ansible.builtin.lineinfile", "regexp"), + ("ansible.builtin.lineinfile", "line"), + ("ansible.legacy.lineinfile", "insertafter"), + ("ansible.legacy.lineinfile", "insertbefore"), + ("ansible.legacy.lineinfile", "regexp"), + ("ansible.legacy.lineinfile", "line"), + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + action = task["action"]["__ansible_module__"] + for k, v, _ in nested_items_path(task): + if isinstance(k, str) and "\t" in k: + return True + if isinstance(v, str) and "\t" in v and (action, k) not in self.allow_list: + return True + return False + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_no_tabs_rule(default_rules_collection: RulesCollection) -> None: + """Test rule matches.""" + results = Runner( + "examples/playbooks/rule-no-tabs.yml", + rules=default_rules_collection, + ).run() + assert results[0].lineno == 10 + assert results[0].message == NoTabsRule().shortdesc + assert len(results) == 2 diff --git a/src/ansiblelint/rules/only_builtins.md b/src/ansiblelint/rules/only_builtins.md new file mode 100644 index 0000000..750e194 --- /dev/null +++ b/src/ansiblelint/rules/only_builtins.md @@ -0,0 +1,36 @@ +# only-builtins + +This rule checks that playbooks use actions from the `ansible.builtin` collection only. + +This is an opt-in rule. +You must enable it in your Ansible-lint configuration as follows: + +```yaml +enable_list: + - only-builtins +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: all + tasks: + - name: Deploy a Helm chart for Prometheus + kubernetes.core.helm: # <- Uses a non-builtin collection. + name: test + chart_ref: stable/prometheus + release_namespace: monitoring + create_namespace: true +``` + +## Correct Code + +```yaml +- name: Example playbook + hosts: localhost + tasks: + - name: Run a shell command + ansible.builtin.shell: echo This playbook uses actions from the builtin collection only. +``` diff --git a/src/ansiblelint/rules/only_builtins.py b/src/ansiblelint/rules/only_builtins.py new file mode 100644 index 0000000..78ad93a --- /dev/null +++ b/src/ansiblelint/rules/only_builtins.py @@ -0,0 +1,106 @@ +"""Rule definition for usage of builtin actions only.""" +from __future__ import annotations + +import os +import sys +from typing import TYPE_CHECKING + +from ansiblelint.config import options +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.rules.fqcn import builtins +from ansiblelint.skip_utils import is_nested_task + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class OnlyBuiltinsRule(AnsibleLintRule): + """Use only builtin actions.""" + + id = "only-builtins" + severity = "MEDIUM" + description = "Check whether the playbook uses anything but ``ansible.builtin``" + tags = ["opt-in", "experimental"] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + module = task["action"]["__ansible_module_original__"] + + allowed_collections = [ + "ansible.builtin", + "ansible.legacy", + *options.only_builtins_allow_collections, + ] + allowed_modules = builtins + options.only_builtins_allow_modules + + is_allowed = ( + any(module.startswith(f"{prefix}.") for prefix in allowed_collections) + or module in allowed_modules + ) + + return not is_allowed and not is_nested_task(task) + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + # pylint: disable=ungrouped-imports + import pytest + + from ansiblelint.constants import RC + from ansiblelint.testing import RunFromText, run_ansible_lint + + SUCCESS_PLAY = """ +- hosts: localhost + tasks: + - name: A block + block: + - name: Shell (fqcn) + ansible.builtin.shell: echo This rule should not get matched by the only-builtins rule + - name: Command with legacy FQCN + ansible.legacy.command: echo This rule should not get matched by the only-builtins rule + """ + + def test_only_builtins_fail() -> None: + """Test rule matches.""" + env = os.environ.copy() + env["NO_COLOR"] = "1" + result = run_ansible_lint( + "--strict", + "--warn-list=", + "--enable-list", + "only-builtins", + "examples/playbooks/rule-only-builtins.yml", + env=env, + ) + assert result.returncode == RC.VIOLATIONS_FOUND + assert "Failed" in result.stderr + assert "warning(s)" in result.stderr + assert "only-builtins: Use only builtin actions" in result.stdout + + def test_only_builtins_allow() -> None: + """Test rule doesn't match.""" + conf_path = "examples/playbooks/.ansible-lint-only-builtins-allow" + result = run_ansible_lint( + f"--config-file={conf_path}", + "--strict", + "--warn-list=", + "--enable-list", + "only-builtins", + "examples/playbooks/rule-only-builtins.yml", + ) + assert "only-builtins" not in result.stdout + assert result.returncode == RC.SUCCESS + + @pytest.mark.parametrize( + "rule_runner", + (OnlyBuiltinsRule,), + indirect=["rule_runner"], + ) + def test_only_builtin_pass(rule_runner: RunFromText) -> None: + """Test rule does not match.""" + results = rule_runner.run_playbook(SUCCESS_PLAY) + assert len(results) == 0, results diff --git a/src/ansiblelint/rules/package_latest.md b/src/ansiblelint/rules/package_latest.md new file mode 100644 index 0000000..c7e0d82 --- /dev/null +++ b/src/ansiblelint/rules/package_latest.md @@ -0,0 +1,71 @@ +# package-latest + +This rule checks that package managers install software in a controlled, safe manner. + +Package manager modules, such as `ansible.builtin.yum`, include a `state` parameter that configures how Ansible installs software. +In production environments, you should set `state` to `present` and specify a target version to ensure that packages are installed to a planned and tested version. + +Setting `state` to `latest` not only installs software, it performs an update and installs additional packages. +This can result in performance degradation or loss of service. +If you do want to update packages to the latest version, you should also set the `update_only` parameter to `true` to avoid installing additional packages. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Install Ansible + ansible.builtin.yum: + name: ansible + state: latest # <- Installs the latest package. + + - name: Install Ansible-lint + ansible.builtin.pip: + name: ansible-lint + args: + state: latest # <- Installs the latest package. + + - name: Install some-package + ansible.builtin.package: + name: some-package + state: latest # <- Installs the latest package. + + - name: Install Ansible with update_only to false + ansible.builtin.yum: + name: sudo + state: latest + update_only: false # <- Updates and installs packages. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Install Ansible + ansible.builtin.yum: + name: ansible-2.12.7.0 + state: present # <- Pins the version to install with yum. + + - name: Install Ansible-lint + ansible.builtin.pip: + name: ansible-lint + args: + state: present + version: 5.4.0 # <- Pins the version to install with pip. + + - name: Install some-package + ansible.builtin.package: + name: some-package + state: present # <- Ensures the package is installed. + + - name: Update Ansible with update_only to true + ansible.builtin.yum: + name: sudo + state: latest + update_only: true # <- Updates but does not install additional packages. +``` diff --git a/src/ansiblelint/rules/package_latest.py b/src/ansiblelint/rules/package_latest.py new file mode 100644 index 0000000..a00a540 --- /dev/null +++ b/src/ansiblelint/rules/package_latest.py @@ -0,0 +1,83 @@ +"""Implementations of the package-latest rule.""" +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class PackageIsNotLatestRule(AnsibleLintRule): + """Package installs should not use latest.""" + + id = "package-latest" + description = ( + "Package installs should use ``state=present`` with or without a version" + ) + severity = "VERY_LOW" + tags = ["idempotency"] + version_added = "historic" + + _package_managers = [ + "apk", + "apt", + "bower", + "bundler", + "dnf", + "easy_install", + "gem", + "homebrew", + "jenkins_plugin", + "npm", + "openbsd_package", + "openbsd_pkg", + "package", + "pacman", + "pear", + "pip", + "pkg5", + "pkgutil", + "portage", + "slackpkg", + "sorcery", + "swdepot", + "win_chocolatey", + "yarn", + "yum", + "zypper", + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + return ( + task["action"]["__ansible_module__"] in self._package_managers + and not task["action"].get("version") + and not task["action"].get("update_only") + and task["action"].get("state") == "latest" + ) diff --git a/src/ansiblelint/rules/partial_become.md b/src/ansiblelint/rules/partial_become.md new file mode 100644 index 0000000..01f9dae --- /dev/null +++ b/src/ansiblelint/rules/partial_become.md @@ -0,0 +1,42 @@ +# partial-become + +This rule checks that privilege escalation is activated when changing users. + +To perform an action as a different user with the `become_user` directive, you +must set `become: true`. + +!!! warning + + While Ansible inherits have of `become` and `become_user` from upper levels, + like play level or command line, we do not look at these values. This rule + requires you to be explicit and always define both in the same place, mainly + in order to prevent accidents when some tasks are moved from one location to + another one. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Start the httpd service as the apache user + ansible.builtin.service: + name: httpd + state: started + become_user: apache # <- Does not change the user because "become: true" is not set. +``` + +## Correct Code + +```yaml +- name: Example playbook + hosts: localhost + tasks: + - name: Start the httpd service as the apache user + ansible.builtin.service: + name: httpd + state: started + become: true # <- Activates privilege escalation. + become_user: apache # <- Changes the user with the desired privileges. +``` diff --git a/src/ansiblelint/rules/partial_become.py b/src/ansiblelint/rules/partial_become.py new file mode 100644 index 0000000..d14c06f --- /dev/null +++ b/src/ansiblelint/rules/partial_become.py @@ -0,0 +1,138 @@ +"""Implementation of partial-become rule.""" +# Copyright (c) 2016 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import annotations + +import sys +from functools import reduce +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +def _get_subtasks(data: dict[str, Any]) -> list[Any]: + result: list[Any] = [] + block_names = [ + "tasks", + "pre_tasks", + "post_tasks", + "handlers", + "block", + "always", + "rescue", + ] + for name in block_names: + if data and name in data: + result += data[name] or [] + return result + + +def _nested_search(term: str, data: dict[str, Any]) -> Any: + if data and term in data: + return True + return reduce( + (lambda x, y: x or _nested_search(term, y)), + _get_subtasks(data), + False, + ) + + +def _become_user_without_become(becomeuserabove: bool, data: dict[str, Any]) -> Any: + if "become" in data: + # If become is in lineage of tree then correct + return False + if "become_user" in data and _nested_search("become", data): + # If 'become_user' on tree and become somewhere below + # we must check for a case of a second 'become_user' without a + # 'become' in its lineage + subtasks = _get_subtasks(data) + return reduce( + (lambda x, y: x or _become_user_without_become(False, y)), + subtasks, + False, + ) + if _nested_search("become_user", data): + # Keep searching down if 'become_user' exists in the tree below current task + subtasks = _get_subtasks(data) + return len(subtasks) == 0 or reduce( + ( + lambda x, y: x + or _become_user_without_become( + becomeuserabove or "become_user" in data, + y, + ) + ), + subtasks, + False, + ) + # If at bottom of tree, flag up if 'become_user' existed in the lineage of the tree and + # 'become' was not. This is an error if any lineage has a 'become_user' but no become + return becomeuserabove + + +class BecomeUserWithoutBecomeRule(AnsibleLintRule): + """become_user requires become to work as expected.""" + + id = "partial-become" + description = "``become_user`` without ``become`` will not actually change user" + severity = "VERY_HIGH" + tags = ["unpredictability"] + version_added = "historic" + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + if file.kind == "playbook": + result = _become_user_without_become(False, data) + if result: + return [ + self.create_matcherror( + message=self.shortdesc, + filename=file, + lineno=data[LINE_NUMBER_KEY], + ), + ] + return [] + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + def test_partial_become_positive() -> None: + """Positive test for partial-become.""" + collection = RulesCollection() + collection.register(BecomeUserWithoutBecomeRule()) + success = "examples/playbooks/rule-partial-become-without-become-pass.yml" + good_runner = Runner(success, rules=collection) + assert [] == good_runner.run() + + def test_partial_become_negative() -> None: + """Negative test for partial-become.""" + collection = RulesCollection() + collection.register(BecomeUserWithoutBecomeRule()) + failure = "examples/playbooks/rule-partial-become-without-become-fail.yml" + bad_runner = Runner(failure, rules=collection) + errs = bad_runner.run() + assert len(errs) == 3 diff --git a/src/ansiblelint/rules/playbook_extension.md b/src/ansiblelint/rules/playbook_extension.md new file mode 100644 index 0000000..dd0e475 --- /dev/null +++ b/src/ansiblelint/rules/playbook_extension.md @@ -0,0 +1,14 @@ +# playbook-extension + +This rule checks the file extension for playbooks is either `.yml` or `.yaml`. +Ansible playbooks are expressed in YAML format with minimal syntax. + +The [YAML syntax](https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html#yaml-syntax) reference provides additional detail. + +## Problematic Code + +This rule is triggered if Ansible playbooks do not have a file extension or use an unsupported file extension such as `playbook.json` or `playbook.xml`. + +## Correct Code + +Save Ansible playbooks as valid YAML with the `.yml` or `.yaml` file extension. diff --git a/src/ansiblelint/rules/playbook_extension.py b/src/ansiblelint/rules/playbook_extension.py new file mode 100644 index 0000000..b4ca41c --- /dev/null +++ b/src/ansiblelint/rules/playbook_extension.py @@ -0,0 +1,55 @@ +"""Implementation of playbook-extension rule.""" +# Copyright (c) 2016, Tsukinowa Inc. <info@tsukinowa.jp> +# Copyright (c) 2018, Ansible Project +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.runner import Runner + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + + +class PlaybookExtensionRule(AnsibleLintRule): + """Use ".yml" or ".yaml" playbook extension.""" + + id = "playbook-extension" + description = 'Playbooks should have the ".yml" or ".yaml" extension' + severity = "MEDIUM" + tags = ["formatting"] + done: list[str] = [] + version_added = "v4.0.0" + + def matchyaml(self, file: Lintable) -> list[MatchError]: + result: list[MatchError] = [] + if file.kind != "playbook": + return result + path = str(file.path) + ext = file.path.suffix + if ext not in [".yml", ".yaml"] and path not in self.done: + self.done.append(path) + result.append(self.create_matcherror(filename=file)) + return result + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + (pytest.param("examples/playbooks/play-without-extension", 1, id="fail"),), + ) + def test_playbook_extension(file: str, expected: int) -> None: + """The ini_file module does not accept preserve mode.""" + rules = RulesCollection() + rules.register(PlaybookExtensionRule()) + results = Runner(Lintable(file, kind="playbook"), rules=rules).run() + assert len(results) == expected + for result in results: + assert result.tag == "playbook-extension" diff --git a/src/ansiblelint/rules/risky_file_permissions.md b/src/ansiblelint/rules/risky_file_permissions.md new file mode 100644 index 0000000..2a62a6d --- /dev/null +++ b/src/ansiblelint/rules/risky_file_permissions.md @@ -0,0 +1,60 @@ +# risky-file-permissions + +This rule is triggered by various modules that could end up creating new files +on disk with permissions that might be too open, or unpredictable. Please read +the documentation of each module carefully to understand the implications of +using different argument values, as these make the difference between using the +module safely or not. The fix depends on each module and also your particular +situation. + +Some modules have a `create` argument that defaults to `true`. For those you +either need to set `create: false` or provide some permissions like `mode: 0600` +to make the behavior predictable and not dependent on the current system +settings. + +Modules that are checked: + +- [`ansible.builtin.assemble`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/assemble_module.html) +- [`ansible.builtin.copy`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/copy_module.html) +- [`ansible.builtin.file`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/file_module.html) +- [`ansible.builtin.get_url`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/get_url_module.html) +- [`ansible.builtin.replace`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/replace_module.html) +- [`ansible.builtin.template`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/template_module.html) +- [`community.general.archive`](https://docs.ansible.com/ansible/latest/collections/community/general/archive_module.html) +- [`community.general.ini_file`](https://docs.ansible.com/ansible/latest/collections/community/general/ini_file_module.html) + +!!! warning + + This rule does not take [module_defaults](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_module_defaults.html) configuration into account. + There are currently no plans to implement this feature because changing task location can also change task behavior. + +## Problematic code + +```yaml +--- +- name: Unsafe example of using ini_file + community.general.ini_file: + path: foo + create: true +``` + +## Correct code + +```yaml +--- +- name: Safe example of using ini_file (1st solution) + community.general.ini_file: + path: foo + create: false # prevents creating a file with potentially insecure permissions + +- name: Safe example of using ini_file (2nd solution) + community.general.ini_file: + path: foo + mode: 0600 # explicitly sets the desired permissions, to make the results predictable + +- name: Safe example of using copy (3rd solution) + ansible.builtin.copy: + src: foo + dest: bar + mode: preserve # copy has a special mode that sets the same permissions as the source file +``` diff --git a/src/ansiblelint/rules/risky_file_permissions.py b/src/ansiblelint/rules/risky_file_permissions.py new file mode 100644 index 0000000..f4494eb --- /dev/null +++ b/src/ansiblelint/rules/risky_file_permissions.py @@ -0,0 +1,168 @@ +# Copyright (c) 2020 Sorin Sbarnea <sorin.sbarnea@gmail.com> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +"""MissingFilePermissionsRule used with ansible-lint.""" +from __future__ import annotations + +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +# Despite documentation mentioning 'preserve' only these modules support it: +_modules_with_preserve = ( + "copy", + "template", +) + +_MODULES: set[str] = { + "archive", + "community.general.archive", + "assemble", + "ansible.builtin.assemble", + "copy", # supports preserve + "ansible.builtin.copy", + "file", + "ansible.builtin.file", + "get_url", + "ansible.builtin.get_url", + "replace", # implicit preserve behavior but mode: preserve is invalid + "ansible.builtin.replace", + "template", # supports preserve + "ansible.builtin.template", + # 'unarchive', # disabled because .tar.gz files can have permissions inside +} + +_MODULES_WITH_CREATE: dict[str, bool] = { + "blockinfile": False, + "ansible.builtin.blockinfile": False, + "htpasswd": True, + "community.general.htpasswd": True, + "ini_file": True, + "community.general.ini_file": True, + "lineinfile": False, + "ansible.builtin.lineinfile": False, +} + + +class MissingFilePermissionsRule(AnsibleLintRule): + """File permissions unset or incorrect.""" + + id = "risky-file-permissions" + description = ( + "Missing or unsupported mode parameter can cause unexpected file " + "permissions based " + "on version of Ansible being used. Be explicit, like `mode: 0644` to " + "avoid hitting this rule. Special `preserve` value is accepted " + f"only by {', '.join([f'`{x}`' for x in _modules_with_preserve])} modules." + ) + link = "https://github.com/ansible/ansible/issues/71200" + severity = "VERY_HIGH" + tags = ["unpredictability"] + version_added = "v4.3.0" + + _modules = _MODULES + _modules_with_create = _MODULES_WITH_CREATE + + # pylint: disable=too-many-return-statements + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + module = task["action"]["__ansible_module__"] + mode = task["action"].get("mode", None) + + if not isinstance(task.args, dict): + # We are unable to check args when using jinja templating + return False + + if module not in self._modules and module not in self._modules_with_create: + return False + + if mode == "preserve" and module not in _modules_with_preserve: + return True + + if module in self._modules_with_create: + create = task["action"].get("create", self._modules_with_create[module]) + return create and mode is None + + # A file that doesn't exist cannot have a mode + if task["action"].get("state", None) == "absent": + return False + + # A symlink always has mode 0777 + if task["action"].get("state", None) == "link": + return False + + # Recurse on a directory does not allow for an uniform mode + if task["action"].get("recurse", None): + return False + + # The file module does not create anything when state==file (default) + if module == "file" and task["action"].get("state", "file") == "file": + return False + + # replace module is the only one that has a valid default preserve + # behavior, but we want to trigger rule if user used incorrect + # documentation and put 'preserve', which is not supported. + if module == "replace" and mode is None: + return False + + return mode is None + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/playbooks/rule-risky-file-permissions-pass.yml", + 0, + id="pass", + ), + pytest.param( + "examples/playbooks/rule-risky-file-permissions-fail.yml", + 11, + id="fails", + ), + ), + ) + def test_risky_file_permissions( + file: str, + expected: int, + default_rules_collection: RulesCollection, + ) -> None: + """The ini_file module does not accept preserve mode.""" + runner = RunFromText(default_rules_collection) + results = runner.run(Path(file)) + assert len(results) == expected + for result in results: + assert result.tag == "risky-file-permissions" diff --git a/src/ansiblelint/rules/risky_octal.md b/src/ansiblelint/rules/risky_octal.md new file mode 100644 index 0000000..a2f22eb --- /dev/null +++ b/src/ansiblelint/rules/risky_octal.md @@ -0,0 +1,49 @@ +# risky-octal + +This rule checks that octal file permissions are strings that contain a leading +zero or are written in +[symbolic modes](https://www.gnu.org/software/findutils/manual/html_node/find_html/Symbolic-Modes.html), +such as `u+rwx` or `u=rw,g=r,o=r`. + +Using integers or octal values in YAML can result in unexpected behavior. For +example, the YAML loader interprets `0644` as the decimal number `420` but +putting `644` there will produce very different results. + +Modules that are checked: + +- [`ansible.builtin.assemble`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/assemble_module.html) +- [`ansible.builtin.copy`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/copy_module.html) +- [`ansible.builtin.file`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/file_module.html) +- [`ansible.builtin.replace`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/replace_module.html) +- [`ansible.builtin.template`](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/template_module.html) + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Unsafe example of declaring Numeric file permissions + ansible.builtin.file: + path: /etc/foo.conf + owner: foo + group: foo + mode: 644 +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Safe example of declaring Numeric file permissions (1st solution) + ansible.builtin.file: + path: /etc/foo.conf + owner: foo + group: foo + mode: "0644" # <- quoting and the leading zero will prevent surprises + # "0o644" is also a valid alternative. +``` diff --git a/src/ansiblelint/rules/risky_octal.py b/src/ansiblelint/rules/risky_octal.py new file mode 100644 index 0000000..e3651ea --- /dev/null +++ b/src/ansiblelint/rules/risky_octal.py @@ -0,0 +1,196 @@ +"""Implementation of risky-octal rule.""" +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule, RulesCollection +from ansiblelint.runner import Runner + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class OctalPermissionsRule(AnsibleLintRule): + """Octal file permissions must contain leading zero or be a string.""" + + id = "risky-octal" + description = ( + "Numeric file permissions without leading zero can behave " + "in unexpected ways." + ) + link = "https://docs.ansible.com/ansible/latest/collections/ansible/builtin/file_module.html" + severity = "VERY_HIGH" + tags = ["formatting"] + version_added = "historic" + + _modules = [ + "assemble", + "copy", + "file", + "ini_file", + "lineinfile", + "replace", + "synchronize", + "template", + "unarchive", + ] + + @staticmethod + def is_invalid_permission(mode: int) -> bool: + """Check if permissions are valid. + + Sensible file permission modes don't have write bit set when read bit + is not set and don't have execute bit set when user execute bit is + not set. + + Also, user permissions are more generous than group permissions and + user and group permissions are more generous than world permissions. + """ + other_write_without_read = ( + mode % 8 and mode % 8 < 4 and not (mode % 8 == 1 and (mode >> 6) % 2 == 1) + ) + group_write_without_read = ( + (mode >> 3) % 8 + and (mode >> 3) % 8 < 4 + and not ((mode >> 3) % 8 == 1 and (mode >> 6) % 2 == 1) + ) + user_write_without_read = ( + (mode >> 6) % 8 and (mode >> 6) % 8 < 4 and (mode >> 6) % 8 != 1 + ) + other_more_generous_than_group = mode % 8 > (mode >> 3) % 8 + other_more_generous_than_user = mode % 8 > (mode >> 6) % 8 + group_more_generous_than_user = (mode >> 3) % 8 > (mode >> 6) % 8 + + return bool( + other_write_without_read + or group_write_without_read + or user_write_without_read + or other_more_generous_than_group + or other_more_generous_than_user + or group_more_generous_than_user, + ) + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["action"]["__ansible_module__"] in self._modules: + mode = task["action"].get("mode", None) + + if isinstance(mode, str): + return False + + if isinstance(mode, int) and self.is_invalid_permission(mode): + return f'`mode: {mode}` should have a string value with leading zero `mode: "0{mode:o}"` or use symbolic mode.' + return False + + +if "pytest" in sys.modules: + import pytest + + VALID_MODES = [ + 0o777, + 0o775, + 0o770, + 0o755, + 0o750, + 0o711, + 0o710, + 0o700, + 0o666, + 0o664, + 0o660, + 0o644, + 0o640, + 0o600, + 0o555, + 0o551, + 0o550, + 0o511, + 0o510, + 0o500, + 0o444, + 0o440, + 0o400, + ] + + INVALID_MODES = [ + 777, + 775, + 770, + 755, + 750, + 711, + 710, + 700, + 666, + 664, + 660, + 644, + 640, + 622, + 620, + 600, + 555, + 551, + 550, # 511 == 0o777, 510 == 0o776, 500 == 0o764 + 444, + 440, + 400, + ] + + @pytest.mark.parametrize( + ("file", "failures"), + ( + pytest.param("examples/playbooks/rule-risky-octal-pass.yml", 0, id="pass"), + pytest.param("examples/playbooks/rule-risky-octal-fail.yml", 4, id="fail"), + ), + ) + def test_octal(file: str, failures: int) -> None: + """Test that octal permissions are valid.""" + collection = RulesCollection() + collection.register(OctalPermissionsRule()) + results = Runner(file, rules=collection).run() + + assert len(results) == failures + for result in results: + assert result.rule.id == "risky-octal" + + def test_octal_valid_modes() -> None: + """Test that octal modes are valid.""" + rule = OctalPermissionsRule() + for mode in VALID_MODES: + assert not rule.is_invalid_permission( + mode, + ), f"0o{mode:o} should be a valid mode" + + def test_octal_invalid_modes() -> None: + """Test that octal modes are invalid.""" + rule = OctalPermissionsRule() + for mode in INVALID_MODES: + assert rule.is_invalid_permission( + mode, + ), f"{mode:d} should be an invalid mode" diff --git a/src/ansiblelint/rules/risky_shell_pipe.md b/src/ansiblelint/rules/risky_shell_pipe.md new file mode 100644 index 0000000..302d0d9 --- /dev/null +++ b/src/ansiblelint/rules/risky_shell_pipe.md @@ -0,0 +1,39 @@ +# risky-shell-pipe + +This rule checks for the bash `pipefail` option with the Ansible `shell` module. + +You should always set `pipefail` when piping output from one command to another. +The return status of a pipeline is the exit status of the command. The +`pipefail` option ensures that tasks fail as expected if the first command +fails. + +As this requirement does apply to PowerShell, for shell commands that have +`pwsh` inside `executable` attribute, this rule will not trigger. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + tasks: + - name: Pipeline without pipefail + ansible.builtin.shell: false | cat +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + become: false + tasks: + - name: Pipeline with pipefail + ansible.builtin.shell: set -o pipefail && false | cat + + - name: Pipeline with pipefail, multi-line + ansible.builtin.shell: | + set -o pipefail # <-- adding this will prevent surprises + false | cat +``` diff --git a/src/ansiblelint/rules/risky_shell_pipe.py b/src/ansiblelint/rules/risky_shell_pipe.py new file mode 100644 index 0000000..58a6f5f --- /dev/null +++ b/src/ansiblelint/rules/risky_shell_pipe.py @@ -0,0 +1,93 @@ +"""Implementation of risky-shell-pipe rule.""" +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import convert_to_boolean, get_cmd_args + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class ShellWithoutPipefail(AnsibleLintRule): + """Shells that use pipes should set the pipefail option.""" + + id = "risky-shell-pipe" + description = ( + "Without the pipefail option set, a shell command that " + "implements a pipeline can fail and still return 0. If " + "any part of the pipeline other than the terminal command " + "fails, the whole pipeline will still return 0, which may " + "be considered a success by Ansible. " + "Pipefail is available in the bash shell." + ) + severity = "MEDIUM" + tags = ["command-shell"] + version_added = "v4.1.0" + + _pipefail_re = re.compile(r"^\s*set.*[+-][A-Za-z]*o\s*pipefail", re.M) + _pipe_re = re.compile(r"(?<!\|)\|(?!\|)") + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str: + if task["__ansible_action_type__"] != "task": + return False + + if task["action"]["__ansible_module__"] != "shell": + return False + + if task.get("ignore_errors"): + return False + + jinja_stripped_cmd = self.unjinja(get_cmd_args(task)) + + # https://github.com/ansible/ansible-lint/issues/3161 + if "pwsh" in task["action"].get("executable", ""): + return False + + return bool( + self._pipe_re.search(jinja_stripped_cmd) + and not self._pipefail_re.search(jinja_stripped_cmd) + and not convert_to_boolean(task["action"].get("ignore_errors", False)), + ) + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param( + "examples/playbooks/rule-risky-shell-pipe-pass.yml", + 0, + id="pass", + ), + pytest.param( + "examples/playbooks/rule-risky-shell-pipe-fail.yml", + 3, + id="fail", + ), + ), + ) + def test_risky_shell_pipe( + default_rules_collection: RulesCollection, + file: str, + expected: int, + ) -> None: + """Validate that rule works as intended.""" + results = Runner(file, rules=default_rules_collection).run() + + for result in results: + assert result.rule.id == ShellWithoutPipefail.id, result + assert len(results) == expected diff --git a/src/ansiblelint/rules/role_name.md b/src/ansiblelint/rules/role_name.md new file mode 100644 index 0000000..28aa8b8 --- /dev/null +++ b/src/ansiblelint/rules/role_name.md @@ -0,0 +1,36 @@ +# role-name + +This rule checks role names to ensure they conform with requirements. + +Role names must contain only lowercase alphanumeric characters and the underscore `_` character. +Role names must also start with an alphabetic character. + +For more information see the [roles directory](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections_structure.html#roles-directory) topic in Ansible documentation. + +`role-name[path]` message tells you to avoid using paths when importing roles. +You should only rely on Ansible's ability to find the role and refer to them +using fully qualified names. + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + roles: + - 1myrole # <- Does not start with an alphabetic character. + - myrole2[*^ # <- Contains invalid special characters. + - myRole_3 # <- Contains uppercase alphabetic characters. +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + roles: + - myrole1 # <- Starts with an alphabetic character. + - myrole2 # <- Contains only alphanumeric characters. + - myrole_3 # <- Contains only lowercase alphabetic characters. +``` diff --git a/src/ansiblelint/rules/role_name.py b/src/ansiblelint/rules/role_name.py new file mode 100644 index 0000000..499c086 --- /dev/null +++ b/src/ansiblelint/rules/role_name.py @@ -0,0 +1,170 @@ +"""Implementation of role-name rule.""" +# Copyright (c) 2020 Gael Chamoulaud <gchamoul@redhat.com> +# Copyright (c) 2020 Sorin Sbarnea <ssbarnea@redhat.com> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from __future__ import annotations + +import re +import sys +from functools import cache +from typing import TYPE_CHECKING + +from ansiblelint.constants import ROLE_IMPORT_ACTION_NAMES +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.utils import parse_yaml_from_file + +if TYPE_CHECKING: + from pathlib import Path + + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +ROLE_NAME_REGEX = re.compile(r"^[a-z][a-z0-9_]*$") + + +def _remove_prefix(text: str, prefix: str) -> str: + return re.sub(rf"^{re.escape(prefix)}", "", text) + + +@cache +def _match_role_name_regex(role_name: str) -> bool: + return ROLE_NAME_REGEX.match(role_name) is not None + + +class RoleNames(AnsibleLintRule): + """Role name {0} does not match ``^[a-z][a-z0-9_]*$`` pattern.""" + + id = "role-name" + description = ( + "Role names are now limited to contain only lowercase alphanumeric " + "characters, plus underline and start with an alpha character." + ) + link = "https://docs.ansible.com/ansible/devel/dev_guide/developing_collections_structure.html#roles-directory" + severity = "HIGH" + tags = ["deprecations", "metadata"] + version_added = "v6.8.5" + _ids = { + "role-name[path]": "Avoid using paths when importing roles.", + } + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + results = [] + if task["action"]["__ansible_module__"] in ROLE_IMPORT_ACTION_NAMES: + name = task["action"].get("name", "") + if "/" in name: + results.append( + self.create_matcherror( + f"Avoid using paths when importing roles. ({name})", + filename=file, + lineno=task["action"].get("__line__", task["__line__"]), + tag=f"{self.id}[path]", + ), + ) + return results + + def matchdir(self, lintable: Lintable) -> list[MatchError]: + return self.matchyaml(lintable) + + def matchyaml(self, file: Lintable) -> list[MatchError]: + result: list[MatchError] = [] + + if file.kind not in ("meta", "role", "playbook"): + return result + + if file.kind == "playbook": + for play in file.data: + if "roles" in play: + line = play["__line__"] + for role in play["roles"]: + if isinstance(role, dict): + line = role["__line__"] + role_name = role["role"] + elif isinstance(role, str): + role_name = role + if "/" in role_name: + result.append( + self.create_matcherror( + f"Avoid using paths when importing roles. ({role_name})", + filename=file, + lineno=line, + tag=f"{self.id}[path]", + ), + ) + return result + + if file.kind == "role": + role_name = self._infer_role_name( + meta=file.path / "meta" / "main.yml", + default=file.path.name, + ) + else: + role_name = self._infer_role_name( + meta=file.path, + default=file.path.resolve().parents[1].name, + ) + + role_name = _remove_prefix(role_name, "ansible-role-") + if role_name and not _match_role_name_regex(role_name): + result.append( + self.create_matcherror( + filename=file, + message=self.shortdesc.format(role_name), + ), + ) + return result + + @staticmethod + def _infer_role_name(meta: Path, default: str) -> str: + if meta.is_file(): + meta_data = parse_yaml_from_file(str(meta)) + if meta_data: + try: + return str(meta_data["galaxy_info"]["role_name"]) + except KeyError: + pass + return default + + +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failure"), + (pytest.param("examples/playbooks/rule-role-name-path.yml", 3, id="fail"),), + ) + def test_role_name_path( + default_rules_collection: RulesCollection, + test_file: str, + failure: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.tag == "role-name[path]" + assert len(results) == failure diff --git a/src/ansiblelint/rules/run_once.md b/src/ansiblelint/rules/run_once.md new file mode 100644 index 0000000..024648b --- /dev/null +++ b/src/ansiblelint/rules/run_once.md @@ -0,0 +1,65 @@ +# run-once + +This rule warns against the use of `run_once` when the `strategy` is set to +`free`. + +This rule can produce the following messages: + +- `run-once[play]`: Play uses `strategy: free`. +- `run-once[task]`: Using `run_once` may behave differently if the `strategy` is + set to `free`. + +For more information see the following topics in Ansible documentation: + +- [free strategy](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/free_strategy.html#free-strategy) +- [selecting a strategy](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_strategies.html#selecting-a-strategy) +- [run_once(playbook keyword) more info](https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html) + +!!! warning + + The reason for the existence of this rule is for reminding users that `run_once` + is not providing any warranty that the task will run only once. + This rule will always trigger regardless of the value configured inside the 'strategy' field. That is because the effective value used at runtime can be different than the value inside the file. For example, ansible command line arguments can alter it. + +It is perfectly fine to add `# noqa: run-once[task]` to mark the warning as +acknowledged and ignored. + +## Problematic Code + +```yaml +--- +- name: "Example with run_once" + hosts: all + strategy: free # <-- avoid use of strategy as free + gather_facts: false + tasks: + - name: Task with run_once + ansible.builtin.debug: + msg: "Test" + run_once: true # <-- avoid use of strategy as free at play level when using run_once at task level +``` + +## Correct Code + +```yaml +- name: "Example without run_once" + hosts: all + gather_facts: false + tasks: + - name: Task without run_once + ansible.builtin.debug: + msg: "Test" +``` + +```yaml +- name: "Example of using run_once with strategy other than free" + hosts: all + strategy: linear + # strategy: free # noqa: run-once[play] (if using strategy: free can skip it this way) + gather_facts: false + tasks: # <-- use noqa to disable rule violations for specific tasks + - name: Task with run_once # noqa: run-once[task] + ansible.builtin.debug: + msg: "Test" + run_once: true +``` diff --git a/src/ansiblelint/rules/run_once.py b/src/ansiblelint/rules/run_once.py new file mode 100644 index 0000000..78968b6 --- /dev/null +++ b/src/ansiblelint/rules/run_once.py @@ -0,0 +1,96 @@ +"""Optional Ansible-lint rule to warn use of run_once with strategy free.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Any + +from ansiblelint.constants import LINE_NUMBER_KEY +from ansiblelint.rules import AnsibleLintRule + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + from ansiblelint.utils import Task + + +class RunOnce(AnsibleLintRule): + """Run once should use strategy other than free.""" + + id = "run-once" + link = "https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html" + description = "When using run_once, we should avoid using strategy as free." + + tags = ["idiom"] + severity = "MEDIUM" + _ids = { + "run-once[task]": "Using run_once may behave differently if strategy is set to free.", + "run-once[play]": "Play uses strategy: free", + } + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific playbook.""" + # If the Play uses the 'strategy' and it's value is set to free + + if not file or file.kind != "playbook" or not data: + return [] + + strategy = data.get("strategy", None) + run_once = data.get("run_once", False) + if (not strategy and not run_once) or strategy != "free": + return [] + return [ + self.create_matcherror( + message="Play uses strategy: free", + filename=file, + tag=f"{self.id}[play]", + # pylint: disable=protected-access + lineno=strategy._line_number, # noqa: SLF001 + ), + ] + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + """Return matches for a task.""" + if not file or file.kind != "playbook": + return [] + + run_once = task.get("run_once", False) + if not run_once: + return [] + return [ + self.create_matcherror( + message="Using run_once may behave differently if strategy is set to free.", + filename=file, + tag=f"{self.id}[task]", + lineno=task[LINE_NUMBER_KEY], + ), + ] + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failure"), + ( + pytest.param("examples/playbooks/run-once-pass.yml", 0, id="pass"), + pytest.param("examples/playbooks/run-once-fail.yml", 2, id="fail"), + ), + ) + def test_run_once( + default_rules_collection: RulesCollection, + test_file: str, + failure: int, + ) -> None: + """Test rule matches.""" + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == RunOnce().id + assert len(results) == failure diff --git a/src/ansiblelint/rules/sanity.md b/src/ansiblelint/rules/sanity.md new file mode 100644 index 0000000..5b4f3a4 --- /dev/null +++ b/src/ansiblelint/rules/sanity.md @@ -0,0 +1,54 @@ +# sanity + +This rule checks the `tests/sanity/ignore-x.x.txt` file for disallowed ignores. +This rule is extremely opinionated and enforced by Partner Engineering. The +currently allowed ruleset is subject to change, but is starting at a minimal +number of allowed ignores for maximum test enforcement. Any commented-out ignore +entries are not evaluated. + +This rule can produce messages like: + +- `sanity[cannot-ignore]` - Ignore file contains {test} at line {line_num}, + which is not a permitted ignore. +- `sanity[bad-ignore]` - Ignore file entry at {line_num} is formatted + incorrectly. Please review. + +Currently allowed ignores for all Ansible versions are: + +- `validate-modules:missing-gplv3-license` +- `action-plugin-docs` +- `import-2.6` +- `import-2.6!skip` +- `import-2.7` +- `import-2.7!skip` +- `import-3.5` +- `import-3.5!skip` +- `compile-2.6` +- `compile-2.6!skip` +- `compile-2.7` +- `compile-2.7!skip` +- `compile-3.5` +- `compile-3.5!skip` + +Additionally allowed ignores for Ansible 2.9 are: +- `validate-modules:deprecation-mismatch` +- `validate-modules:invalid-documentation` + +## Problematic code + +``` +# tests/sanity/ignore-x.x.txt +plugins/module_utils/ansible_example_module.py import-3.6!skip +``` + +``` +# tests/sanity/ignore-x.x.txt +plugins/module_utils/ansible_example_module.oops-3.6!skip +``` + +## Correct code + +``` +# tests/sanity/ignore-x.x.txt +plugins/module_utils/ansible_example_module.py import-2.7!skip +``` diff --git a/src/ansiblelint/rules/sanity.py b/src/ansiblelint/rules/sanity.py new file mode 100644 index 0000000..09fe7cc --- /dev/null +++ b/src/ansiblelint/rules/sanity.py @@ -0,0 +1,148 @@ +"""Implementation of sanity rule.""" +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +from ansiblelint.rules import AnsibleLintRule + +# Copyright (c) 2018, Ansible Project + + +if TYPE_CHECKING: + from ansiblelint.errors import MatchError + from ansiblelint.file_utils import Lintable + + +class CheckSanityIgnoreFiles(AnsibleLintRule): + """Ignore entries in sanity ignore files must match an allow list.""" + + id = "sanity" + description = ( + "Identifies non-allowed entries in the `tests/sanity/ignore*.txt files." + ) + severity = "MEDIUM" + tags = ["idiom"] + version_added = "v6.14.0" + + # Partner Engineering defines this list. Please contact PE for changes. + + allowed_ignores_v2_9 = [ + "validate-modules:deprecation-mismatch", # Note: 2.9 expects a deprecated key in the METADATA. It was removed in later versions. + "validate-modules:invalid-documentation", # Note: The removed_at_date key in the deprecated section is invalid for 2.9. + ] + + allowed_ignores_all = [ + "validate-modules:missing-gplv3-license", + "action-plugin-docs", # Added for Networking Collections + "import-2.6", + "import-2.6!skip", + "import-2.7", + "import-2.7!skip", + "import-3.5", + "import-3.5!skip", + "compile-2.6", + "compile-2.6!skip", + "compile-2.7", + "compile-2.7!skip", + "compile-3.5", + "compile-3.5!skip", + ] + _ids = { + "sanity[cannot-ignore]": "Ignore file contains ... at line ..., which is not a permitted ignore.", + "sanity[bad-ignore]": "Ignore file entry at ... is formatted incorrectly. Please review.", + } + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Evaluate sanity ignore lists for disallowed ignores. + + :param file: Input lintable file that is a match for `sanity-ignore-file` + :returns: List of errors matched to the input file + """ + results: list[MatchError] = [] + test = "" + + if file.kind != "sanity-ignore-file": + return [] + + with file.path.open(encoding="utf-8") as ignore_file: + entries = ignore_file.read().splitlines() + + ignores = self.allowed_ignores_all + + # If there is a ignore-2.9.txt file, add the v2_9 list of allowed ignores + if "ignore-2.9.txt" in str(file.abspath): + ignores = self.allowed_ignores_all + self.allowed_ignores_v2_9 + + for line_num, entry in enumerate(entries, 1): + if entry and entry[0] != "#": + try: + if "#" in entry: + entry, _ = entry.split("#") + (_, test) = entry.split() + if test not in ignores: + results.append( + self.create_matcherror( + message=f"Ignore file contains {test} at line {line_num}, which is not a permitted ignore.", + tag="sanity[cannot-ignore]", + lineno=line_num, + filename=file, + ), + ) + + except ValueError: + results.append( + self.create_matcherror( + message=f"Ignore file entry at {line_num} is formatted incorrectly. Please review.", + tag="sanity[bad-ignore]", + lineno=line_num, + filename=file, + ), + ) + + return results + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports + from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports + + @pytest.mark.parametrize( + ("test_file", "failures", "tags"), + ( + pytest.param( + "examples/sanity_ignores/tests/sanity/ignore-2.9.txt", + 0, + "sanity[cannot-ignore]", + id="pass", + ), + pytest.param( + "examples/sanity_ignores/tests/sanity/ignore-2.15.txt", + 1, + "sanity[bad-ignore]", + id="fail0", + ), + pytest.param( + "examples/sanity_ignores/tests/sanity/ignore-2.13.txt", + 1, + "sanity[cannot-ignore]", + id="fail1", + ), + ), + ) + def test_sanity_ignore_files( + default_rules_collection: RulesCollection, + test_file: str, + failures: int, + tags: str, + ) -> None: + """Test rule matches.""" + default_rules_collection.register(CheckSanityIgnoreFiles()) + results = Runner(test_file, rules=default_rules_collection).run() + for result in results: + assert result.rule.id == CheckSanityIgnoreFiles().id + assert result.tag == tags + assert len(results) == failures diff --git a/src/ansiblelint/rules/schema.md b/src/ansiblelint/rules/schema.md new file mode 100644 index 0000000..7c62120 --- /dev/null +++ b/src/ansiblelint/rules/schema.md @@ -0,0 +1,80 @@ +# schema + +The `schema` rule validates Ansible metadata files against JSON schemas. These +schemas ensure the compatibility of Ansible syntax content across versions. + +This `schema` rule is **mandatory**. You cannot use inline `noqa` comments to +ignore it. + +Ansible-lint validates the `schema` rule before processing other rules. This +prevents unexpected syntax from triggering multiple rule violations. + +## Validated schema + +Ansible-lint currently validates several schemas that are maintained in separate +projects and updated independently to ansible-lint. + +> Report bugs related to schema in their respective repository and not in the +> ansible-lint project. + +Maintained in the [ansible-lint](https://github.com/ansible/ansible-lint) +project: + +- `schema[ansible-lint-config]` validates + [ansible-lint configuration](https://github.com/ansible/ansible-lint/blob/main/src/ansiblelint/schemas/ansible-lint-config.json) +- `schema[role-arg-spec]` validates + [role argument specs](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_reuse_roles.html#specification-format) + which is a little bit different than the module argument spec. +- `schema[execution-environment]` validates + [execution environments](https://docs.ansible.com/automation-controller/latest/html/userguide/execution_environments.html) +- `schema[galaxy]` validates + [collection metadata](https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html). +- `schema[inventory]` validates + [inventory files](https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html) + that match `inventory/*.yml`. +- `schema[meta-runtime]` validates + [runtime information](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections_structure.html#meta-directory-and-runtime-yml) + that matches `meta/runtime.yml` +- `schema[meta]` validates metadata for roles that match `meta/main.yml`. See + [role-dependencies](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_reuse_roles.html#role-dependencies) + or + [role/metadata.py](https://github.com/ansible/ansible/blob/devel/lib/ansible/playbook/role/metadata.py#L79)) + for details. +- `schema[playbook]` validates Ansible playbooks. +- `schema[requirements]` validates Ansible + [requirements](https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#install-multiple-collections-with-a-requirements-file) + files that match `requirements.yml`. +- `schema[tasks]` validates Ansible task files that match `tasks/**/*.yml`. +- `schema[vars]` validates Ansible + [variables](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html) + that match `vars/*.yml` and `defaults/*.yml`. + +Maintained in the +[ansible-navigator](https://github.com/ansible/ansible-navigator) project: + +- `schema[ansible-navigator]` validates + [ansible-navigator configuration](https://github.com/ansible/ansible-navigator/blob/main/src/ansible_navigator/data/ansible-navigator.json) + +## schema[meta] + +For `meta/main.yml` files, Ansible-lint requires a `galaxy_info.standalone` +property that clarifies if a role is an old standalone one or a new one, +collection based: + +```yaml +galaxy_info: + standalone: true # <-- this is a standalone role (not part of a collection) +``` + +Ansible-lint requires the `standalone` key to avoid confusion and provide more +specific error messages. For example, the `meta` schema will require some +properties only for standalone roles or prevent the use of some properties that +are not supported by collections. + +You cannot use an empty `meta/main.yml` file or use only comments in the +`meta/main.yml` file. + +## schema[moves] + +These errors usually look like "foo was moved to bar in 2.10" and indicate +module moves between Ansible versions. diff --git a/src/ansiblelint/rules/schema.py b/src/ansiblelint/rules/schema.py new file mode 100644 index 0000000..32ff2eb --- /dev/null +++ b/src/ansiblelint/rules/schema.py @@ -0,0 +1,371 @@ +"""Rule definition for JSON Schema Validations.""" +from __future__ import annotations + +import logging +import sys +from typing import TYPE_CHECKING, Any + +from ansiblelint.errors import MatchError +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.schemas.__main__ import JSON_SCHEMAS +from ansiblelint.schemas.main import validate_file_schema +from ansiblelint.text import has_jinja + +if TYPE_CHECKING: + from ansiblelint.utils import Task + + +_logger = logging.getLogger(__name__) + + +DESCRIPTION_MD = """ Returned errors will not include exact line numbers, but they will mention +the schema name being used as a tag, like ``schema[playbook]``, +``schema[tasks]``. + +This rule is not skippable and stops further processing of the file. + +If incorrect schema was picked, you might want to either: + +* move the file to standard location, so its file is detected correctly. +* use ``kinds:`` option in linter config to help it pick correct file type. +""" + +pre_checks = { + "task": { + "with_flattened": { + "msg": "with_flattened was moved to with_community.general.flattened in 2.10", + "tag": "moves", + }, + "with_filetree": { + "msg": "with_filetree was moved to with_community.general.filetree in 2.10", + "tag": "moves", + }, + "with_cartesian": { + "msg": "with_cartesian was moved to with_community.general.flattened in 2.10", + "tag": "moves", + }, + }, +} + + +class ValidateSchemaRule(AnsibleLintRule): + """Perform JSON Schema Validation for known lintable kinds.""" + + description = DESCRIPTION_MD + + id = "schema" + severity = "VERY_HIGH" + tags = ["core"] + version_added = "v6.1.0" + _ids = { + "schema[ansible-lint-config]": "", + "schema[ansible-navigator-config]": "", + "schema[changelog]": "", + "schema[execution-environment]": "", + "schema[galaxy]": "", + "schema[inventory]": "", + "schema[meta]": "", + "schema[meta-runtime]": "", + "schema[molecule]": "", + "schema[playbook]": "", + "schema[requirements]": "", + "schema[role-arg-spec]": "", + "schema[rulebook]": "", + "schema[tasks]": "", + "schema[vars]": "", + } + _field_checks: dict[str, list[str]] = {} + + @property + def field_checks(self) -> dict[str, list[str]]: + """Lazy property for returning field checks.""" + if not self._collection: + msg = "Rule was not registered to a RuleCollection." + raise RuntimeError(msg) + if not self._field_checks: + self._field_checks = { + "become_method": sorted( + self._collection.app.runtime.plugins.become.keys(), + ), + } + return self._field_checks + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific playbook.""" + results: list[MatchError] = [] + if not data or file.kind not in ("tasks", "handlers", "playbook"): + return results + # check at play level + results.extend(self._get_field_matches(file=file, data=data)) + return results + + def _get_field_matches( + self, + file: Lintable, + data: dict[str, Any], + ) -> list[MatchError]: + """Retrieve all matches related to fields for the given data block.""" + results = [] + for key, values in self.field_checks.items(): + if key in data: + plugin_value = data[key] + if not has_jinja(plugin_value) and plugin_value not in values: + msg = f"'{key}' must be one of the currently available values: {', '.join(values)}" + results.append( + MatchError( + message=msg, + lineno=data.get("__line__", 1), + lintable=file, + rule=ValidateSchemaRule(), + details=ValidateSchemaRule.description, + tag=f"schema[{file.kind}]", + ), + ) + return results + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> bool | str | MatchError | list[MatchError]: + results = [] + if not file: + file = Lintable("", kind="tasks") + results.extend(self._get_field_matches(file=file, data=task.raw_task)) + for key in pre_checks["task"]: + if key in task.raw_task: + msg = pre_checks["task"][key]["msg"] + tag = pre_checks["task"][key]["tag"] + results.append( + MatchError( + message=msg, + lintable=file, + rule=ValidateSchemaRule(), + details=ValidateSchemaRule.description, + tag=f"schema[{tag}]", + ), + ) + return results + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return JSON validation errors found as a list of MatchError(s).""" + result: list[MatchError] = [] + if file.kind not in JSON_SCHEMAS: + return result + + errors = validate_file_schema(file) + if errors: + if errors[0].startswith("Failed to load YAML file"): + _logger.debug( + "Ignored failure to load %s for schema validation, as !vault may cause it.", + file, + ) + return [] + + result.append( + MatchError( + message=errors[0], + lintable=file, + rule=ValidateSchemaRule(), + details=ValidateSchemaRule.description, + tag=f"schema[{file.kind}]", + ), + ) + + if not result: + result = super().matchyaml(file) + return result + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + # pylint: disable=ungrouped-imports + from ansiblelint.config import options + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner + + @pytest.mark.parametrize( + ("file", "expected_kind", "expected"), + ( + pytest.param( + "examples/collection/galaxy.yml", + "galaxy", + ["'GPL' is not one of"], + id="galaxy", + ), + pytest.param( + "examples/roles/invalid_requirements_schema/meta/requirements.yml", + "requirements", + ["{'foo': 'bar'} is not valid under any of the given schemas"], + id="requirements", + ), + pytest.param( + "examples/roles/invalid_meta_schema/meta/main.yml", + "meta", + ["False is not of type 'string'"], + id="meta", + ), + pytest.param( + "examples/playbooks/vars/invalid_vars_schema.yml", + "vars", + ["'123' does not match any of the regexes"], + id="vars", + ), + pytest.param( + "examples/execution-environment.yml", + "execution-environment", + [], + id="execution-environment", + ), + pytest.param( + "examples/ee_broken/execution-environment.yml", + "execution-environment", + ["{'foo': 'bar'} is not valid under any of the given schemas"], + id="execution-environment-broken", + ), + ("examples/meta/runtime.yml", "meta-runtime", []), + pytest.param( + "examples/broken_collection_meta_runtime/meta/runtime.yml", + "meta-runtime", + ["Additional properties are not allowed ('foo' was unexpected)"], + id="meta-runtime-broken", + ), + pytest.param( + "examples/inventory/production.yml", + "inventory", + [], + id="inventory", + ), + pytest.param( + "examples/inventory/broken_dev_inventory.yml", + "inventory", + ["Additional properties are not allowed ('foo' was unexpected)"], + id="inventory-broken", + ), + pytest.param( + ".ansible-lint", + "ansible-lint-config", + [], + id="ansible-lint-config", + ), + pytest.param( + "examples/.config/ansible-lint.yml", + "ansible-lint-config", + [], + id="ansible-lint-config2", + ), + pytest.param( + "examples/broken/.ansible-lint", + "ansible-lint-config", + ["Additional properties are not allowed ('foo' was unexpected)"], + id="ansible-lint-config-broken", + ), + pytest.param( + "examples/ansible-navigator.yml", + "ansible-navigator-config", + [], + id="ansible-navigator-config", + ), + pytest.param( + "examples/broken/ansible-navigator.yml", + "ansible-navigator-config", + ["Additional properties are not allowed ('ansible' was unexpected)"], + id="ansible-navigator-config-broken", + ), + pytest.param( + "examples/roles/hello/meta/argument_specs.yml", + "role-arg-spec", + [], + id="role-arg-spec", + ), + pytest.param( + "examples/roles/broken_argument_specs/meta/argument_specs.yml", + "role-arg-spec", + ["Additional properties are not allowed ('foo' was unexpected)"], + id="role-arg-spec-broken", + ), + pytest.param( + "examples/changelogs/changelog.yaml", + "changelog", + ["Additional properties are not allowed ('foo' was unexpected)"], + id="changelog", + ), + pytest.param( + "examples/rulebooks/rulebook-fail.yml", + "rulebook", + [ + "Additional properties are not allowed ('that_should_not_be_here' was unexpected)", + ], + id="rulebook", + ), + pytest.param( + "examples/rulebooks/rulebook-pass.yml", + "rulebook", + [], + id="rulebook2", + ), + pytest.param( + "examples/playbooks/rule-schema-become-method-pass.yml", + "playbook", + [], + id="playbook", + ), + pytest.param( + "examples/playbooks/rule-schema-become-method-fail.yml", + "playbook", + [ + "'become_method' must be one of the currently available values", + "'become_method' must be one of the currently available values", + ], + id="playbook2", + ), + ), + ) + def test_schema(file: str, expected_kind: str, expected: list[str]) -> None: + """Validate parsing of ansible output.""" + lintable = Lintable(file) + assert lintable.kind == expected_kind + + rules = RulesCollection(options=options) + rules.register(ValidateSchemaRule()) + results = Runner(lintable, rules=rules).run() + + assert len(results) == len(expected), results + for idx, result in enumerate(results): + assert result.filename.endswith(file) + assert expected[idx] in result.message + assert result.tag == f"schema[{expected_kind}]" + + @pytest.mark.parametrize( + ("file", "expected_kind", "expected_tag", "count"), + ( + pytest.param( + "examples/playbooks/rule-syntax-moves.yml", + "playbook", + "schema[moves]", + 3, + id="playbook", + ), + ), + ) + def test_schema_moves( + file: str, + expected_kind: str, + expected_tag: str, + count: int, + ) -> None: + """Validate ability to detect schema[moves].""" + lintable = Lintable(file) + assert lintable.kind == expected_kind + + rules = RulesCollection(options=options) + rules.register(ValidateSchemaRule()) + results = Runner(lintable, rules=rules).run() + + assert len(results) == count, results + for result in results: + assert result.filename.endswith(file) + assert result.tag == expected_tag diff --git a/src/ansiblelint/rules/syntax_check.md b/src/ansiblelint/rules/syntax_check.md new file mode 100644 index 0000000..e8197a5 --- /dev/null +++ b/src/ansiblelint/rules/syntax_check.md @@ -0,0 +1,45 @@ +# syntax-check + +Our linter runs `ansible-playbook --syntax-check` on all playbooks, and if any +of these reports a syntax error, this stops any further processing of these +files. + +This error **cannot be disabled** due to being a prerequisite for other steps. +You can exclude these files from linting, but it is better to make sure they can +be loaded by Ansible. This is often achieved by editing the inventory file +and/or `ansible.cfg` so ansible can load required variables. + +If undefined variables cause the failure, you can use the jinja `default()` +filter to provide fallback values, like in the example below. + +This rule is among the few `unskippable` rules that cannot be added to +`skip_list` or `warn_list`. One possible workaround is to add the entire file to +the `exclude_paths`. This is a valid approach for special cases, like testing +fixtures that are invalid on purpose. + +One of the most common sources of errors is a failure to assert the presence of +various variables at the beginning of the playbook. + +This rule can produce messages like below: + +- `syntax-check[empty-playbook]` is raised when a playbook file has no content. + +## Problematic code + +```yaml +--- +- name: + Bad use of variable inside hosts block (wrong assumption of it being + defined) + hosts: "{{ my_hosts }}" + tasks: [] +``` + +## Correct code + +```yaml +--- +- name: Good use of variable inside hosts, without assumptions + hosts: "{{ my_hosts | default([]) }}" + tasks: [] +``` diff --git a/src/ansiblelint/rules/syntax_check.py b/src/ansiblelint/rules/syntax_check.py new file mode 100644 index 0000000..c6a4c5e --- /dev/null +++ b/src/ansiblelint/rules/syntax_check.py @@ -0,0 +1,58 @@ +"""Rule definition for ansible syntax check.""" +from __future__ import annotations + +import re +from dataclasses import dataclass + +from ansiblelint.rules import AnsibleLintRule + + +@dataclass +class KnownError: + """Class that tracks result of linting.""" + + tag: str + regex: re.Pattern[str] + + +OUTPUT_PATTERNS = ( + KnownError( + tag="missing-file", + regex=re.compile( + # do not use <filename> capture group for this because we want to report original file, not the missing target one + r"(?P<title>Unable to retrieve file contents)\n(?P<details>Could not find or access '(?P<value>.*)'[^\n]*)", + re.MULTILINE | re.S | re.DOTALL, + ), + ), + KnownError( + tag="specific", + regex=re.compile( + r"^ERROR! (?P<title>[^\n]*)\n\nThe error appears to be in '(?P<filename>[\w\/\.\-]+)': line (?P<line>\d+), column (?P<column>\d+)", + re.MULTILINE | re.S | re.DOTALL, + ), + ), + KnownError( + tag="empty-playbook", + regex=re.compile( + "Empty playbook, nothing to do", + re.MULTILINE | re.S | re.DOTALL, + ), + ), + KnownError( + tag="malformed", + regex=re.compile( + "^ERROR! (?P<title>A malformed block was encountered while loading a block[^\n]*)", + re.MULTILINE | re.S | re.DOTALL, + ), + ), +) + + +class AnsibleSyntaxCheckRule(AnsibleLintRule): + """Ansible syntax check failed.""" + + id = "syntax-check" + severity = "VERY_HIGH" + tags = ["core", "unskippable"] + version_added = "v5.0.0" + _order = 0 diff --git a/src/ansiblelint/rules/var_naming.md b/src/ansiblelint/rules/var_naming.md new file mode 100644 index 0000000..3386a0c --- /dev/null +++ b/src/ansiblelint/rules/var_naming.md @@ -0,0 +1,77 @@ +# var-naming + +This rule checks variable names to ensure they conform with requirements. + +Variable names must contain only lowercase alphanumeric characters and the +underscore `_` character. Variable names must also start with either an +alphabetic or underscore `_` character. + +For more information see the [creating valid variable names][var-names] topic in +Ansible documentation and [Naming things (Good Practices for Ansible)][cop]. + +You should also be fully aware of [special variables][magic-vars], also known as +magic variables, especially as most of them can only be read. While Ansible will +just ignore any attempt to set them, the linter will notify the user, so they +would not be confused about a line that does not effectively do anything. + +Possible errors messages: + +- `var-naming[non-string]`: Variables names must be strings. +- `var-naming[non-ascii]`: Variables names must be ASCII. +- `var-naming[no-keyword]`: Variables names must not be Python keywords. +- `var-naming[no-jinja]`: Variables names must not contain jinja2 templating. +- `var-naming[pattern]`: Variables names should match ... regex. +- `var-naming[no-role-prefix]`: Variables names from within roles should use + `role_name_` as a prefix. +- `var-naming[no-reserved]`: Variables names must not be Ansible reserved names. +- `var-naming[read-only]`: This special variable is read-only. + +!!! note + + When using `include_role` or `import_role` with `vars`, vars should start + with included role name prefix. As this role might not be compliant + with this rule yet, you might need to temporarily disable this rule using + a `# noqa: var-naming[no-role-prefix]` comment. + +## Settings + +This rule behavior can be changed by altering the below settings: + +```yaml +# .ansible-lint +var_naming_pattern: "^[a-z_][a-z0-9_]*$" +``` + +## Problematic Code + +```yaml +--- +- name: Example playbook + hosts: localhost + vars: + CamelCase: true # <- Contains a mix of lowercase and uppercase characters. + ALL_CAPS: bar # <- Contains only uppercase characters. + v@r!able: baz # <- Contains special characters. + hosts: [] # <- hosts is an Ansible reserved name + role_name: boo # <-- invalid as being Ansible special magic variable +``` + +## Correct Code + +```yaml +--- +- name: Example playbook + hosts: localhost + vars: + lowercase: true # <- Contains only lowercase characters. + no_caps: bar # <- Does not contains uppercase characters. + variable: baz # <- Does not contain special characters. + my_hosts: [] # <- Does not use a reserved names. + my_role_name: boo +``` + +[cop]: https://redhat-cop.github.io/automation-good-practices/#_naming_things +[var-names]: + https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#creating-valid-variable-names +[magic-vars]: + https://docs.ansible.com/ansible/latest/reference_appendices/special_variables.html diff --git a/src/ansiblelint/rules/var_naming.py b/src/ansiblelint/rules/var_naming.py new file mode 100644 index 0000000..389530d --- /dev/null +++ b/src/ansiblelint/rules/var_naming.py @@ -0,0 +1,370 @@ +"""Implementation of var-naming rule.""" +from __future__ import annotations + +import keyword +import re +import sys +from typing import TYPE_CHECKING, Any + +from ansible.parsing.yaml.objects import AnsibleUnicode +from ansible.vars.reserved import get_reserved_names + +from ansiblelint.config import options +from ansiblelint.constants import ANNOTATION_KEYS, LINE_NUMBER_KEY, RC +from ansiblelint.errors import MatchError +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule, RulesCollection +from ansiblelint.runner import Runner +from ansiblelint.skip_utils import get_rule_skips_from_line +from ansiblelint.utils import parse_yaml_from_file + +if TYPE_CHECKING: + from ansiblelint.utils import Task + + +class VariableNamingRule(AnsibleLintRule): + """All variables should be named using only lowercase and underscores.""" + + id = "var-naming" + severity = "MEDIUM" + tags = ["idiom"] + version_added = "v5.0.10" + needs_raw_task = True + re_pattern_str = options.var_naming_pattern or "^[a-z_][a-z0-9_]*$" + re_pattern = re.compile(re_pattern_str) + reserved_names = get_reserved_names() + # List of special variables that should be treated as read-only. This list + # does not include connection variables, which we expect users to tune in + # specific cases. + # https://docs.ansible.com/ansible/latest/reference_appendices/special_variables.html + read_only_names = { + "ansible_check_mode", + "ansible_collection_name", + "ansible_config_file", + "ansible_dependent_role_names", + "ansible_diff_mode", + "ansible_forks", + "ansible_index_var", + "ansible_inventory_sources", + "ansible_limit", + "ansible_local", # special fact + "ansible_loop", + "ansible_loop_var", + "ansible_parent_role_names", + "ansible_parent_role_paths", + "ansible_play_batch", + "ansible_play_hosts", + "ansible_play_hosts_all", + "ansible_play_name", + "ansible_play_role_names", + "ansible_playbook_python", + "ansible_role_name", + "ansible_role_names", + "ansible_run_tags", + "ansible_search_path", + "ansible_skip_tags", + "ansible_verbosity", + "ansible_version", + "group_names", + "groups", + "hostvars", + "inventory_dir", + "inventory_file", + "inventory_hostname", + "inventory_hostname_short", + "omit", + "play_hosts", + "playbook_dir", + "role_name", + "role_names", + "role_path", + } + + # These special variables are used by Ansible but we allow users to set + # them as they might need it in certain cases. + allowed_special_names = { + "ansible_facts", + "ansible_become_user", + "ansible_connection", + "ansible_host", + "ansible_python_interpreter", + "ansible_user", + "ansible_remote_tmp", # no included in docs + } + _ids = { + "var-naming[no-reserved]": "Variables names must not be Ansible reserved names.", + "var-naming[no-jinja]": "Variables names must not contain jinja2 templating.", + "var-naming[pattern]": f"Variables names should match {re_pattern_str} regex.", + } + + # pylint: disable=too-many-return-statements + def get_var_naming_matcherror( + self, + ident: str, + *, + prefix: str = "", + ) -> MatchError | None: + """Return a MatchError if the variable name is not valid, otherwise None.""" + if not isinstance(ident, str): # pragma: no cover + return MatchError( + tag="var-naming[non-string]", + message="Variables names must be strings.", + rule=self, + ) + + if ident in ANNOTATION_KEYS or ident in self.allowed_special_names: + return None + + try: + ident.encode("ascii") + except UnicodeEncodeError: + return MatchError( + tag="var-naming[non-ascii]", + message=f"Variables names must be ASCII. ({ident})", + rule=self, + ) + + if keyword.iskeyword(ident): + return MatchError( + tag="var-naming[no-keyword]", + message=f"Variables names must not be Python keywords. ({ident})", + rule=self, + ) + + if ident in self.reserved_names: + return MatchError( + tag="var-naming[no-reserved]", + message=f"Variables names must not be Ansible reserved names. ({ident})", + rule=self, + ) + + if ident in self.read_only_names: + return MatchError( + tag="var-naming[read-only]", + message=f"This special variable is read-only. ({ident})", + rule=self, + ) + + # We want to allow use of jinja2 templating for variable names + if "{{" in ident: + return MatchError( + tag="var-naming[no-jinja]", + message="Variables names must not contain jinja2 templating.", + rule=self, + ) + + if not bool(self.re_pattern.match(ident)): + return MatchError( + tag="var-naming[pattern]", + message=f"Variables names should match {self.re_pattern_str} regex. ({ident})", + rule=self, + ) + + if prefix and not ident.startswith(f"{prefix}_"): + return MatchError( + tag="var-naming[no-role-prefix]", + message="Variables names from within roles should use role_name_ as a prefix.", + rule=self, + ) + return None + + def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]: + """Return matches found for a specific playbook.""" + results: list[MatchError] = [] + raw_results: list[MatchError] = [] + + if not data or file.kind not in ("tasks", "handlers", "playbook", "vars"): + return results + # If the Play uses the 'vars' section to set variables + our_vars = data.get("vars", {}) + for key in our_vars: + match_error = self.get_var_naming_matcherror(key) + if match_error: + match_error.filename = str(file.path) + match_error.lineno = ( + key.ansible_pos[1] + if isinstance(key, AnsibleUnicode) + else our_vars[LINE_NUMBER_KEY] + ) + raw_results.append(match_error) + if raw_results: + lines = file.content.splitlines() + for match in raw_results: + # lineno starts with 1, not zero + skip_list = get_rule_skips_from_line( + line=lines[match.lineno - 1], + lintable=file, + ) + if match.rule.id not in skip_list and match.tag not in skip_list: + results.append(match) + + return results + + def matchtask( + self, + task: Task, + file: Lintable | None = None, + ) -> list[MatchError]: + """Return matches for task based variables.""" + results = [] + prefix = "" + filename = "" if file is None else str(file.path) + if file and file.parent and file.parent.kind == "role": + prefix = file.parent.path.name + ansible_module = task["action"]["__ansible_module__"] + # If the task uses the 'vars' section to set variables + our_vars = task.get("vars", {}) + if ansible_module in ("include_role", "import_role"): + action = task["action"] + if isinstance(action, dict): + role_fqcn = action.get("name", "") + prefix = role_fqcn.split("/" if "/" in role_fqcn else ".")[-1] + else: + prefix = "" + for key in our_vars: + match_error = self.get_var_naming_matcherror(key, prefix=prefix) + if match_error: + match_error.filename = filename + match_error.lineno = our_vars[LINE_NUMBER_KEY] + match_error.message += f" (vars: {key})" + results.append(match_error) + + # If the task uses the 'set_fact' module + if ansible_module == "set_fact": + for key in filter( + lambda x: isinstance(x, str) + and not x.startswith("__") + and x != "cacheable", + task["action"].keys(), + ): + match_error = self.get_var_naming_matcherror(key, prefix=prefix) + if match_error: + match_error.filename = filename + match_error.lineno = task["action"][LINE_NUMBER_KEY] + match_error.message += f" (set_fact: {key})" + results.append(match_error) + + # If the task registers a variable + registered_var = task.get("register", None) + if registered_var: + match_error = self.get_var_naming_matcherror(registered_var, prefix=prefix) + if match_error: + match_error.message += f" (register: {registered_var})" + match_error.filename = filename + match_error.lineno = task[LINE_NUMBER_KEY] + results.append(match_error) + + return results + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return matches for variables defined in vars files.""" + results: list[MatchError] = [] + raw_results: list[MatchError] = [] + meta_data: dict[AnsibleUnicode, Any] = {} + filename = "" if file is None else str(file.path) + + if str(file.kind) == "vars" and file.data: + meta_data = parse_yaml_from_file(str(file.path)) + for key in meta_data: + match_error = self.get_var_naming_matcherror(key) + if match_error: + match_error.filename = filename + match_error.lineno = key.ansible_pos[1] + match_error.message += f" (vars: {key})" + raw_results.append(match_error) + if raw_results: + lines = file.content.splitlines() + for match in raw_results: + # lineno starts with 1, not zero + skip_list = get_rule_skips_from_line( + line=lines[match.lineno - 1], + lintable=file, + ) + if match.rule.id not in skip_list and match.tag not in skip_list: + results.append(match) + else: + results.extend(super().matchyaml(file)) + return results + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + from ansiblelint.testing import ( # pylint: disable=ungrouped-imports + run_ansible_lint, + ) + + @pytest.mark.parametrize( + ("file", "expected"), + ( + pytest.param("examples/playbooks/rule-var-naming-fail.yml", 7, id="0"), + pytest.param("examples/Taskfile.yml", 0, id="1"), + ), + ) + def test_invalid_var_name_playbook(file: str, expected: int) -> None: + """Test rule matches.""" + rules = RulesCollection(options=options) + rules.register(VariableNamingRule()) + results = Runner(Lintable(file), rules=rules).run() + assert len(results) == expected + for result in results: + assert result.rule.id == VariableNamingRule.id + # We are not checking line numbers because they can vary between + # different versions of ruamel.yaml (and depending on presence/absence + # of its c-extension) + + def test_invalid_var_name_varsfile( + default_rules_collection: RulesCollection, + ) -> None: + """Test rule matches.""" + results = Runner( + Lintable("examples/playbooks/vars/rule_var_naming_fail.yml"), + rules=default_rules_collection, + ).run() + expected_errors = ( + ("schema[vars]", 1), + ("var-naming[pattern]", 2), + ("var-naming[pattern]", 6), + ("var-naming[no-jinja]", 7), + ("var-naming[no-keyword]", 9), + ("var-naming[non-ascii]", 10), + ("var-naming[no-reserved]", 11), + ("var-naming[read-only]", 12), + ) + assert len(results) == len(expected_errors) + for idx, result in enumerate(results): + assert result.tag == expected_errors[idx][0] + assert result.lineno == expected_errors[idx][1] + + def test_var_naming_with_pattern() -> None: + """Test rule matches.""" + role_path = "examples/roles/var_naming_pattern/tasks/main.yml" + conf_path = "examples/roles/var_naming_pattern/.ansible-lint" + result = run_ansible_lint( + f"--config-file={conf_path}", + role_path, + ) + assert result.returncode == RC.SUCCESS + assert "var-naming" not in result.stdout + + def test_var_naming_with_include_tasks_and_vars() -> None: + """Test with include tasks and vars.""" + role_path = "examples/roles/var_naming_pattern/tasks/include_task_with_vars.yml" + result = run_ansible_lint(role_path) + assert result.returncode == RC.SUCCESS + assert "var-naming" not in result.stdout + + def test_var_naming_with_set_fact_and_cacheable() -> None: + """Test with include tasks and vars.""" + role_path = "examples/roles/var_naming_pattern/tasks/cacheable_set_fact.yml" + result = run_ansible_lint(role_path) + assert result.returncode == RC.SUCCESS + assert "var-naming" not in result.stdout + + def test_var_naming_with_include_role_import_role() -> None: + """Test with include role and import role.""" + role_path = "examples/test_collection/roles/my_role/tasks/main.yml" + result = run_ansible_lint(role_path) + assert result.returncode == RC.SUCCESS + assert "var-naming" not in result.stdout diff --git a/src/ansiblelint/rules/yaml.md b/src/ansiblelint/rules/yaml.md new file mode 100644 index 0000000..8dc56eb --- /dev/null +++ b/src/ansiblelint/rules/yaml.md @@ -0,0 +1,97 @@ +# yaml + +This rule checks YAML syntax and is an implementation of `yamllint`. + +You can disable YAML syntax violations by adding `yaml` to the `skip_list` in +your Ansible-lint configuration as follows: + +```yaml +skip_list: + - yaml +``` + +For more fine-grained control, disable violations for specific rules using tag +identifiers in the `yaml[yamllint_rule]` format as follows: + +```yaml +skip_list: + - yaml[trailing-spaces] + - yaml[indentation] +``` + +If you want Ansible-lint to report YAML syntax violations as warnings, and not +fatal errors, add tag identifiers to the `warn_list` in your configuration, for +example: + +```yaml +warn_list: + - yaml[document-start] +``` + +!!! warning + + You cannot use `tags: [skip_ansible_lint]` to disable this rule but you can + use [yamllint magic comments](https://yamllint.readthedocs.io/en/stable/disable_with_comments.html#disabling-checks-for-all-or-part-of-the-file) for tuning it. + +See the +[list of yamllint rules](https://yamllint.readthedocs.io/en/stable/rules.html) +for more information. + +Some of the detailed error codes that you might see are: + +- `yaml[brackets]` - _too few spaces inside empty brackets_, or _too many spaces + inside brackets_ +- `yaml[colons]` - _too many spaces before colon_, or _too many spaces after + colon_ +- `yaml[commas]` - _too many spaces before comma_, or _too few spaces after + comma_ +- `yaml[comments-indentation]` - _Comment not indented like content_ +- `yaml[comments]` - _Too few spaces before comment_, or _Missing starting space + in comment_ +- `yaml[document-start]` - _missing document start "---"_ or _found forbidden + document start "---"_ +- `yaml[empty-lines]` - _too many blank lines (...> ...)_ +- `yaml[indentation]` - _Wrong indentation: expected ... but found ..._ +- `yaml[key-duplicates]` - _Duplication of key "..." in mapping_ +- `yaml[new-line-at-end-of-file]` - _No new line character at the end of file_ +- `yaml[octal-values]`: forbidden implicit or explicit [octal](#octals) value +- `yaml[syntax]` - YAML syntax is broken +- `yaml[trailing-spaces]` - Spaces are found at the end of lines +- `yaml[truthy]` - _Truthy value should be one of ..._ + +## Octals + +As [YAML specification] regarding octal values changed at least 3 times in +[1.1], [1.2.0] and [1.2.2] we now require users to always add quotes around +octal values, so the YAML loaders will all load them as strings, providing a +consistent behavior. This is also safer as JSON does not support octal values +either. + +By default, yamllint does not check for octals but our custom default ruleset +for it does check these. If for some reason, you do not want to follow our +defaults, you can create a `.yamllint` file in your project and this will take +precedence over our defaults. + +## Problematic code + +```yaml +# Missing YAML document start. +foo: 0777 # <-- yaml[octal-values] +foo2: 0o777 # <-- yaml[octal-values] +foo2: ... # <-- yaml[key-duplicates] +bar: ... # <-- yaml[comments-indentation] +``` + +## Correct code + +```yaml +--- +foo: "0777" # <-- Explicitly quoting octal is less risky. +foo2: "0o777" # <-- Explicitly quoting octal is less risky. +bar: ... # Correct comment indentation. +``` + +[1.1]: https://yaml.org/spec/1.1/ +[1.2.0]: https://yaml.org/spec/1.2.0/ +[1.2.2]: https://yaml.org/spec/1.2.2/ +[yaml specification]: https://yaml.org/ diff --git a/src/ansiblelint/rules/yaml_rule.py b/src/ansiblelint/rules/yaml_rule.py new file mode 100644 index 0000000..4da4d41 --- /dev/null +++ b/src/ansiblelint/rules/yaml_rule.py @@ -0,0 +1,210 @@ +"""Implementation of yaml linting rule (yamllint integration).""" +from __future__ import annotations + +import logging +import sys +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from yamllint.linter import run as run_yamllint + +from ansiblelint.constants import LINE_NUMBER_KEY, SKIPPED_RULES_KEY +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule +from ansiblelint.yaml_utils import load_yamllint_config + +if TYPE_CHECKING: + from typing import Any + + from ansiblelint.errors import MatchError + +_logger = logging.getLogger(__name__) + + +class YamllintRule(AnsibleLintRule): + """Violations reported by yamllint.""" + + id = "yaml" + severity = "VERY_LOW" + tags = ["formatting", "yaml"] + version_added = "v5.0.0" + config = load_yamllint_config() + has_dynamic_tags = True + link = "https://yamllint.readthedocs.io/en/stable/rules.html" + # ensure this rule runs before most of other common rules + _order = 1 + _ids = { + "yaml[anchors]": "", + "yaml[braces]": "", + "yaml[brackets]": "", + "yaml[colons]": "", + "yaml[commas]": "", + "yaml[comments-indentation]": "", + "yaml[comments]": "", + "yaml[document-end]": "", + "yaml[document-start]": "", + "yaml[empty-lines]": "", + "yaml[empty-values]": "", + "yaml[float-values]": "", + "yaml[hyphens]": "", + "yaml[indentation]": "", + "yaml[key-duplicates]": "", + "yaml[key-ordering]": "", + "yaml[line-length]": "", + "yaml[new-line-at-end-of-file]": "", + "yaml[new-lines]": "", + "yaml[octal-values]": "", + "yaml[quoted-strings]": "", + "yaml[trailing-spaces]": "", + "yaml[truthy]": "", + } + + def matchyaml(self, file: Lintable) -> list[MatchError]: + """Return matches found for a specific YAML text.""" + matches: list[MatchError] = [] + if str(file.base_kind) != "text/yaml": + return matches + + for problem in run_yamllint( + file.content, + YamllintRule.config, + filepath=file.path, + ): + self.severity = "VERY_LOW" + if problem.level == "error": + self.severity = "MEDIUM" + matches.append( + self.create_matcherror( + # yamllint does return lower-case sentences + message=problem.desc.capitalize(), + lineno=problem.line, + details="", + filename=file, + tag=f"yaml[{problem.rule}]", + ), + ) + return matches + + +def _combine_skip_rules(data: Any) -> set[str]: + """Return a consolidated list of skipped rules.""" + result = set(data.get(SKIPPED_RULES_KEY, [])) + tags = data.get("tags", []) + if tags and ( + isinstance(tags, Iterable) + and "skip_ansible_lint" in tags + or tags == "skip_ansible_lint" + ): + result.add("skip_ansible_lint") + return result + + +def _fetch_skips(data: Any, collector: dict[int, set[str]]) -> dict[int, set[str]]: + """Retrieve a dictionary with line: skips by looking recursively in given JSON structure.""" + if hasattr(data, "get") and data.get(LINE_NUMBER_KEY): + rules = _combine_skip_rules(data) + if rules: + collector[data.get(LINE_NUMBER_KEY)].update(rules) + if isinstance(data, Iterable) and not isinstance(data, str): + if isinstance(data, dict): + for _entry, value in data.items(): + _fetch_skips(value, collector) + else: # must be some kind of list + for entry in data: + if ( + entry + and hasattr(entry, "get") + and LINE_NUMBER_KEY in entry + and SKIPPED_RULES_KEY in entry + and entry[SKIPPED_RULES_KEY] + ): + collector[entry[LINE_NUMBER_KEY]].update(entry[SKIPPED_RULES_KEY]) + _fetch_skips(entry, collector) + return collector + + +# testing code to be loaded only with pytest or when executed the rule file +if "pytest" in sys.modules: + import pytest + + # pylint: disable=ungrouped-imports + from ansiblelint.config import options + from ansiblelint.rules import RulesCollection + from ansiblelint.runner import Runner + + @pytest.mark.parametrize( + ("file", "expected_kind", "expected"), + ( + pytest.param( + "examples/yamllint/invalid.yml", + "yaml", + [ + 'Missing document start "---"', + 'Duplication of key "foo" in mapping', + "Trailing spaces", + ], + id="invalid", + ), + pytest.param("examples/yamllint/valid.yml", "yaml", [], id="valid"), + pytest.param( + "examples/yamllint/line-length.yml", + "yaml", + ["Line too long (166 > 160 characters)"], + id="line-length", + ), + pytest.param( + "examples/yamllint/multi-document.yaml", + "yaml", + [], + id="multi-document", + ), + pytest.param( + "examples/yamllint/skipped-rule.yml", + "yaml", + [], + id="skipped-rule", + ), + pytest.param( + "examples/playbooks/rule-yaml-fail.yml", + "playbook", + [ + "Truthy value should be one of [false, true]", + "Truthy value should be one of [false, true]", + "Truthy value should be one of [false, true]", + ], + id="rule-yaml-fail", + ), + pytest.param( + "examples/playbooks/rule-yaml-pass.yml", + "playbook", + [], + id="rule-yaml-pass", + ), + ), + ) + @pytest.mark.filterwarnings("ignore::ansible_compat.runtime.AnsibleWarning") + def test_yamllint(file: str, expected_kind: str, expected: list[str]) -> None: + """Validate parsing of ansible output.""" + lintable = Lintable(file) + assert lintable.kind == expected_kind + + rules = RulesCollection(options=options) + rules.register(YamllintRule()) + results = Runner(lintable, rules=rules).run() + + assert len(results) == len(expected), results + for idx, result in enumerate(results): + assert result.filename.endswith(file) + assert expected[idx] in result.message + assert isinstance(result.tag, str) + assert result.tag.startswith("yaml[") + + def test_yamllint_has_help(default_rules_collection: RulesCollection) -> None: + """Asserts that we loaded markdown documentation in help property.""" + for rule in default_rules_collection: + if rule.id == "yaml": + assert rule.help is not None + assert len(rule.help) > 100 + break + else: # pragma: no cover + pytest.fail("No yaml rule found") diff --git a/src/ansiblelint/runner.py b/src/ansiblelint/runner.py new file mode 100644 index 0000000..9d3500d --- /dev/null +++ b/src/ansiblelint/runner.py @@ -0,0 +1,568 @@ +"""Runner implementation.""" +from __future__ import annotations + +import json +import logging +import multiprocessing +import multiprocessing.pool +import os +import re +import subprocess +import tempfile +import warnings +from dataclasses import dataclass +from fnmatch import fnmatch +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from ansible.errors import AnsibleError +from ansible.parsing.splitter import split_args +from ansible.parsing.yaml.constructor import AnsibleMapping +from ansible.plugins.loader import add_all_plugin_dirs +from ansible_compat.runtime import AnsibleWarning + +import ansiblelint.skip_utils +import ansiblelint.utils +from ansiblelint._internal.rules import ( + BaseRule, + LoadingFailureRule, + RuntimeErrorRule, + WarningRule, +) +from ansiblelint.app import App, get_app +from ansiblelint.constants import States +from ansiblelint.errors import LintWarning, MatchError, WarnSource +from ansiblelint.file_utils import Lintable, expand_dirs_in_lintables +from ansiblelint.logger import timed_info +from ansiblelint.rules.syntax_check import OUTPUT_PATTERNS, AnsibleSyntaxCheckRule +from ansiblelint.text import strip_ansi_escape +from ansiblelint.utils import ( + PLAYBOOK_DIR, + _include_children, + _roles_children, + _taskshandlers_children, + template, +) + +if TYPE_CHECKING: + from collections.abc import Generator + from typing import Callable + + from ansiblelint.config import Options + from ansiblelint.constants import FileType + from ansiblelint.rules import RulesCollection + +_logger = logging.getLogger(__name__) + + +@dataclass +class LintResult: + """Class that tracks result of linting.""" + + matches: list[MatchError] + files: set[Lintable] + + +class Runner: + """Runner class performs the linting process.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes + def __init__( + self, + *lintables: Lintable | str | Path, + rules: RulesCollection, + tags: frozenset[Any] = frozenset(), + skip_list: list[str] | None = None, + exclude_paths: list[str] | None = None, + verbosity: int = 0, + checked_files: set[Lintable] | None = None, + project_dir: str | None = None, + ) -> None: + """Initialize a Runner instance.""" + self.rules = rules + self.lintables: set[Lintable] = set() + self.project_dir = os.path.abspath(project_dir) if project_dir else None + + if skip_list is None: + skip_list = [] + if exclude_paths is None: + exclude_paths = [] + + # Assure consistent type and configure given lintables as explicit (so + # excludes paths would not apply on them). + for item in lintables: + if not isinstance(item, Lintable): + item = Lintable(item) + item.explicit = True + self.lintables.add(item) + + # Expand folders (roles) to their components + expand_dirs_in_lintables(self.lintables) + + self.tags = tags + self.skip_list = skip_list + self._update_exclude_paths(exclude_paths) + self.verbosity = verbosity + if checked_files is None: + checked_files = set() + self.checked_files = checked_files + + def _update_exclude_paths(self, exclude_paths: list[str]) -> None: + if exclude_paths: + # These will be (potentially) relative paths + paths = ansiblelint.file_utils.expand_paths_vars(exclude_paths) + # Since ansiblelint.utils.find_children returns absolute paths, + # and the list of files we create in `Runner.run` can contain both + # relative and absolute paths, we need to cover both bases. + self.exclude_paths = paths + [os.path.abspath(p) for p in paths] + else: + self.exclude_paths = [] + + def is_excluded(self, lintable: Lintable) -> bool: + """Verify if a file path should be excluded.""" + # Any will short-circuit as soon as something returns True, but will + # be poor performance for the case where the path under question is + # not excluded. + + # Exclusions should be evaluated only using absolute paths in order + # to work correctly. + + # Explicit lintables are never excluded + if lintable.explicit: + return False + + abs_path = str(lintable.abspath) + if self.project_dir and not abs_path.startswith(self.project_dir): + _logger.debug( + "Skipping %s as it is outside of the project directory.", + abs_path, + ) + return True + + return any( + abs_path.startswith(path) + or lintable.path.match(path) + or fnmatch(str(abs_path), path) + or fnmatch(str(lintable), path) + for path in self.exclude_paths + ) + + def run(self) -> list[MatchError]: + """Execute the linting process.""" + matches: list[MatchError] = [] + with warnings.catch_warnings(record=True) as captured_warnings: + warnings.simplefilter("always") + matches = self._run() + for warn in captured_warnings: + # Silence Ansible runtime warnings that are unactionable + # https://github.com/ansible/ansible-lint/issues/3216 + if warn.category is AnsibleWarning and isinstance(warn.source, dict): + msg = warn.source["msg"] + if msg.startswith( + "Falling back to Ansible unique filter as Jinja2 one failed", + ): + continue + # For the moment we are ignoring deprecation warnings as Ansible + # modules outside current content can generate them and user + # might not be able to do anything about them. + if warn.category is DeprecationWarning: + continue + if warn.category is LintWarning: + filename: None | Lintable = None + if isinstance(warn.source, WarnSource): + match = MatchError( + message=warn.source.message or warn.category.__name__, + rule=WarningRule(), + filename=warn.source.filename.filename, + tag=warn.source.tag, + lineno=warn.source.lineno, + ) + else: + filename = warn.source + match = MatchError( + message=warn.message + if isinstance(warn.message, str) + else "?", + rule=WarningRule(), + filename=str(filename), + ) + matches.append(match) + continue + _logger.warning( + "%s:%s %s %s", + warn.filename, + warn.lineno or 1, + warn.category.__name__, + warn.message, + ) + return matches + + def _run(self) -> list[MatchError]: + """Run the linting (inner loop).""" + files: list[Lintable] = [] + matches: list[MatchError] = [] + + # remove exclusions + for lintable in self.lintables.copy(): + if self.is_excluded(lintable): + _logger.debug("Excluded %s", lintable) + self.lintables.remove(lintable) + continue + if isinstance(lintable.data, States) and lintable.exc: + lintable.exc.__class__.__name__.lower() + matches.append( + MatchError( + lintable=lintable, + message=str(lintable.exc), + details=str(lintable.exc.__cause__), + rule=LoadingFailureRule(), + tag=f"load-failure[{lintable.exc.__class__.__name__.lower()}]", + ), + ) + lintable.stop_processing = True + # identify missing files/folders + if not lintable.path.exists(): + matches.append( + MatchError( + lintable=lintable, + message="File or directory not found.", + rule=LoadingFailureRule(), + tag="load-failure[not-found]", + ), + ) + + # -- phase 1 : syntax check in parallel -- + app = get_app(offline=True) + + def worker(lintable: Lintable) -> list[MatchError]: + # pylint: disable=protected-access + return self._get_ansible_syntax_check_matches( + lintable=lintable, + app=app, + ) + + for lintable in self.lintables: + if lintable.kind not in ("playbook", "role") or lintable.stop_processing: + continue + files.append(lintable) + + # avoid resource leak warning, https://github.com/python/cpython/issues/90549 + # pylint: disable=unused-variable + global_resource = multiprocessing.Semaphore() # noqa: F841 + + pool = multiprocessing.pool.ThreadPool(processes=multiprocessing.cpu_count()) + return_list = pool.map(worker, files, chunksize=1) + pool.close() + pool.join() + for data in return_list: + matches.extend(data) + + matches = self._filter_excluded_matches(matches) + # -- phase 2 --- + if not matches: + # do our processing only when ansible syntax check passed in order + # to avoid causing runtime exceptions. Our processing is not as + # resilient to be able process garbage. + matches.extend(self._emit_matches(files)) + + # remove duplicates from files list + files = [value for n, value in enumerate(files) if value not in files[:n]] + + for file in self.lintables: + if file in self.checked_files or not file.kind: + continue + _logger.debug( + "Examining %s of type %s", + ansiblelint.file_utils.normpath(file.path), + file.kind, + ) + + matches.extend( + self.rules.run(file, tags=set(self.tags), skip_list=self.skip_list), + ) + + # update list of checked files + self.checked_files.update(self.lintables) + + # remove any matches made inside excluded files + matches = self._filter_excluded_matches(matches) + + return sorted(set(matches)) + + # pylint: disable=too-many-locals + def _get_ansible_syntax_check_matches( + self, + lintable: Lintable, + app: App, + ) -> list[MatchError]: + """Run ansible syntax check and return a list of MatchError(s).""" + default_rule: BaseRule = AnsibleSyntaxCheckRule() + fh = None + results = [] + if lintable.kind not in ("playbook", "role"): + return [] + + with timed_info( + "Executing syntax check on %s %s", + lintable.kind, + lintable.path, + ): + if lintable.kind == "role": + playbook_text = f""" +--- +- name: Temporary playbook for role syntax check + hosts: localhost + tasks: + - ansible.builtin.import_role: + name: {lintable.path.expanduser()!s} +""" + # pylint: disable=consider-using-with + fh = tempfile.NamedTemporaryFile(mode="w", suffix=".yml", prefix="play") + fh.write(playbook_text) + fh.flush() + playbook_path = fh.name + else: + playbook_path = str(lintable.path.expanduser()) + # To avoid noisy warnings we pass localhost as current inventory: + # [WARNING]: No inventory was parsed, only implicit localhost is available + # [WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all' + cmd = [ + "ansible-playbook", + "-i", + "localhost,", + "--syntax-check", + playbook_path, + ] + if app.options.extra_vars: + cmd.extend(["--extra-vars", json.dumps(app.options.extra_vars)]) + + # To reduce noisy warnings like + # CryptographyDeprecationWarning: Blowfish has been deprecated + # https://github.com/paramiko/paramiko/issues/2038 + env = app.runtime.environ.copy() + env["PYTHONWARNINGS"] = "ignore" + + run = subprocess.run( + cmd, + stdin=subprocess.PIPE, + capture_output=True, + shell=False, # needed when command is a list # noqa: S603 + text=True, + check=False, + env=env, + ) + + if run.returncode != 0: + message = None + filename = lintable + lineno = 1 + column = None + + stderr = strip_ansi_escape(run.stderr) + stdout = strip_ansi_escape(run.stdout) + if stderr: + details = stderr + if stdout: + details += "\n" + stdout + else: + details = stdout + + for pattern in OUTPUT_PATTERNS: + rule = default_rule + match = re.search(pattern.regex, stderr) + if match: + groups = match.groupdict() + title = groups.get("title", match.group(0)) + details = groups.get("details", "") + lineno = int(groups.get("line", 1)) + + if "filename" in groups: + filename = Lintable(groups["filename"]) + else: + filename = lintable + column = int(groups.get("column", 1)) + results.append( + MatchError( + message=title, + lintable=filename, + lineno=lineno, + column=column, + rule=rule, + details=details, + tag=f"{rule.id}[{pattern.tag}]", + ), + ) + + if not results: + rule = RuntimeErrorRule() + message = ( + f"Unexpected error code {run.returncode} from " + f"execution of: {' '.join(cmd)}" + ) + results.append( + MatchError( + message=message, + lintable=filename, + lineno=lineno, + column=column, + rule=rule, + details=details, + tag="", + ), + ) + + if fh: + fh.close() + return results + + def _filter_excluded_matches(self, matches: list[MatchError]) -> list[MatchError]: + return [ + match + for match in matches + if not self.is_excluded(match.lintable) + and match.tag not in match.lintable.line_skips[match.lineno] + ] + + def _emit_matches(self, files: list[Lintable]) -> Generator[MatchError, None, None]: + visited: set[Lintable] = set() + while visited != self.lintables: + for lintable in self.lintables - visited: + try: + children = self.find_children(lintable) + for child in children: + if self.is_excluded(child): + continue + self.lintables.add(child) + files.append(child) + except MatchError as exc: + if not exc.filename: # pragma: no branch + exc.filename = str(lintable.path) + exc.rule = LoadingFailureRule() + yield exc + except AttributeError: + yield MatchError(lintable=lintable, rule=LoadingFailureRule()) + visited.add(lintable) + + def find_children(self, lintable: Lintable) -> list[Lintable]: + """Traverse children of a single file or folder.""" + if not lintable.path.exists(): + return [] + playbook_dir = str(lintable.path.parent) + ansiblelint.utils.set_collections_basedir(lintable.path.parent) + add_all_plugin_dirs(playbook_dir or ".") + if lintable.kind == "role": + playbook_ds = AnsibleMapping({"roles": [{"role": str(lintable.path)}]}) + elif lintable.kind not in ("playbook", "tasks"): + return [] + else: + try: + playbook_ds = ansiblelint.utils.parse_yaml_from_file(str(lintable.path)) + except AnsibleError as exc: + raise SystemExit(exc) from exc + results = [] + # playbook_ds can be an AnsibleUnicode string, which we consider invalid + if isinstance(playbook_ds, str): + raise MatchError(lintable=lintable, rule=LoadingFailureRule()) + for item in ansiblelint.utils.playbook_items(playbook_ds): + # if lintable.kind not in ["playbook"]: + for child in self.play_children( + lintable.path.parent, + item, + lintable.kind, + playbook_dir, + ): + # We avoid processing parametrized children + path_str = str(child.path) + if "$" in path_str or "{{" in path_str: + continue + + # Repair incorrect paths obtained when old syntax was used, like: + # - include: simpletask.yml tags=nginx + valid_tokens = [] + for token in split_args(path_str): + if "=" in token: + break + valid_tokens.append(token) + path = " ".join(valid_tokens) + if path != path_str: + child.path = Path(path) + child.name = child.path.name + + results.append(child) + return results + + def play_children( + self, + basedir: Path, + item: tuple[str, Any], + parent_type: FileType, + playbook_dir: str, + ) -> list[Lintable]: + """Flatten the traversed play tasks.""" + # pylint: disable=unused-argument + delegate_map: dict[str, Callable[[str, Any, Any, FileType], list[Lintable]]] = { + "tasks": _taskshandlers_children, + "pre_tasks": _taskshandlers_children, + "post_tasks": _taskshandlers_children, + "block": _taskshandlers_children, + "include": _include_children, + "ansible.builtin.include": _include_children, + "import_playbook": _include_children, + "ansible.builtin.import_playbook": _include_children, + "roles": _roles_children, + "dependencies": _roles_children, + "handlers": _taskshandlers_children, + "include_tasks": _include_children, + "ansible.builtin.include_tasks": _include_children, + "import_tasks": _include_children, + "ansible.builtin.import_tasks": _include_children, + } + (k, v) = item + add_all_plugin_dirs(str(basedir.resolve())) + + if k in delegate_map and v: + v = template( + basedir, + v, + {"playbook_dir": PLAYBOOK_DIR or str(basedir.resolve())}, + fail_on_undefined=False, + ) + return delegate_map[k](str(basedir), k, v, parent_type) + return [] + + +def _get_matches(rules: RulesCollection, options: Options) -> LintResult: + lintables = ansiblelint.utils.get_lintables(opts=options, args=options.lintables) + + for rule in rules: + if "unskippable" in rule.tags: + for entry in (*options.skip_list, *options.warn_list): + if rule.id == entry or entry.startswith(f"{rule.id}["): + msg = f"Rule '{rule.id}' is unskippable, you cannot use it in 'skip_list' or 'warn_list'. Still, you could exclude the file." + raise RuntimeError(msg) + matches = [] + checked_files: set[Lintable] = set() + runner = Runner( + *lintables, + rules=rules, + tags=frozenset(options.tags), + skip_list=options.skip_list, + exclude_paths=options.exclude_paths, + verbosity=options.verbosity, + checked_files=checked_files, + project_dir=options.project_dir, + ) + matches.extend(runner.run()) + + # Assure we do not print duplicates and the order is consistent + matches = sorted(set(matches)) + + # Convert reported filenames into human readable ones, so we hide the + # fact we used temporary files when processing input from stdin. + for match in matches: + for lintable in lintables: + if match.filename == lintable.filename: + match.filename = lintable.name + break + + return LintResult(matches=matches, files=checked_files) diff --git a/src/ansiblelint/schemas/README.md b/src/ansiblelint/schemas/README.md new file mode 100644 index 0000000..6d986b0 --- /dev/null +++ b/src/ansiblelint/schemas/README.md @@ -0,0 +1,102 @@ +# Schemas for Ansible and its related tools + +[![ci](https://github.com/ansible-community/schemas/actions/workflows/task.yml/badge.svg)](https://github.com/ansible-community/schemas/actions/workflows/task.yml) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Repository License: MIT](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE) + +## About Schemas + +This project aims to generate JSON/YAML validation schemas for Ansible files +such as playbooks, tasks, requirements, meta or vars and also for Molecule +configuration. + +Keep in mind that these schemas will limit your freedom of choice regarding the +syntax you can use to write Ansible tasks as they do not allow some historical +forms which are still allowed by Ansible itself. + +Not any file accepted by Ansible will pass these schemas but we do expect that +any file that passed these schemas should be accepted by Ansible. + +- YAML 1.2 booleans are required as `true` or `false`, while Ansible itself + allows you to use more relaxed forms like `yes` or `no`. +- Inline actions are not allowed, as schema cannot validate them +- Non-built-in modules must be called using `action:` blocks +- Module arguments are not yet verified but we plan to implement it +- Out schemas are strict about usage of jinja2 templating and require `{{` on + arguments declared as **explicit**, which forbid the use of `{{` on those + marked as **implicit**. See the section below for details. + +As these schemas are still experimental, creating pull requests to improve the +schema is of much greater help. Though you are still welcome to report bugs but +expect them to take a long time until someone finds time to fix them. + +If you want to help improve the schemas, have a look at the +[development documentation](CONTRIBUTING.md). + +## Schema Bundle + +We are currently migrating towards a single [ansible.json](/f/ansible.json) +schema bundle, one that contains subschema definitions for all the supported +file types. + +To configure your validator or editor to use the bundle, use the new URLs below, +the part after the `#` in the URLs is essential for informing the loader about +which subschema to use. You can also look at our +[settings.json](.vscode/settings.json) to understand how to configure the +[vscode-yaml](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) +extension. + +- [playbook subschema url](https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json#/$defs/playbook) +- [tasks subschema uri](https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json#/$defs/tasks) + +## Jinja2 implicit vs explicit templating + +While Ansible might allow you to combine implicit and explicit templating, our +schema will not. Our schemas will only allow you to use the recommended form, +either by forbidding you to use the curly braces on implicit ones or forcing you +to add them on explicit ones. + +Examples: + +```yaml +- name: some task + command: echo 123 + register: result + vars: + become_method_var: sudo + become_method: become_method_var # <-- schema will not allow this + # become_method: "{{ become_method_var }}" # <-- that is allowed +``` + +### How to find if a field is implicit or explicit? + +Run assuming that your keyword is `no_log`, you can run +`ansible-doc -t keyword no_log`, which will give you the following output: + +```yaml +failed_when: + applies_to: + - Task + description: + Conditional expression that overrides the task's normal 'failed' status. + priority: 0 + template: implicit + type: list +``` + +As you can see the `template` field tells you if is implicit or explicit. + +Being more restrictive, schema protects you from common accidents, like writing +a simple string in an explicit field. That will always evaluate as true instead +of being evaluated as a jinja template. + +## Activating the schemas + +At this moment installing +[Ansible VS Code Extension by Red Hat](https://marketplace.visualstudio.com/items?itemName=redhat.ansible) +will activate these schemas. The file patterns used to trigger their use can be +seen +[here](https://github.com/ansible-community/vscode-ansible/blob/master/package.json#L44-L94) + +Because these schemas are generic, you can easily use them with any validators +that support them. diff --git a/src/ansiblelint/schemas/__init__.py b/src/ansiblelint/schemas/__init__.py new file mode 100644 index 0000000..f1dad48 --- /dev/null +++ b/src/ansiblelint/schemas/__init__.py @@ -0,0 +1 @@ +"""Module containing cached JSON schemas.""" diff --git a/src/ansiblelint/schemas/__main__.py b/src/ansiblelint/schemas/__main__.py new file mode 100644 index 0000000..e3ec8ae --- /dev/null +++ b/src/ansiblelint/schemas/__main__.py @@ -0,0 +1,120 @@ +"""Module containing cached JSON schemas.""" +import json +import logging +import os +import sys +import time +import urllib.request +from collections import defaultdict +from functools import cache +from http.client import HTTPException +from pathlib import Path +from typing import Any +from urllib.request import Request + +_logger = logging.getLogger(__package__) + +# Maps kinds to JSON schemas +# See https://www.schemastore.org/json/ +store_file = Path(f"{__file__}/../__store__.json").resolve() +with store_file.open(encoding="utf-8") as json_file: + JSON_SCHEMAS = json.load(json_file) + + +class SchemaCacheDict(defaultdict): # type: ignore[type-arg] + """Caching schema store.""" + + def __missing__(self, key: str) -> Any: + """Load schema on its first use.""" + value = get_schema(key) + self[key] = value + return value + + +@cache +def get_schema(kind: str) -> Any: + """Return the schema for the given kind.""" + schema_file = Path(__file__).parent / f"{kind}.json" + with schema_file.open(encoding="utf-8") as f: + return json.load(f) + + +_schema_cache = SchemaCacheDict() + + +def refresh_schemas(min_age_seconds: int = 3600 * 24) -> int: + """Refresh JSON schemas by downloading latest versions. + + Returns number of changed schemas. + """ + age = int(time.time() - store_file.stat().st_mtime) + + # never check for updated schemas more than once a day + if min_age_seconds > age: + return 0 + if not os.access(store_file, os.W_OK): # pragma: no cover + _logger.debug( + "Skipping schema update due to lack of writing rights on %s", + store_file, + ) + return -1 + _logger.debug("Checking for updated schemas...") + + changed = 0 + for kind, data in JSON_SCHEMAS.items(): + url = data["url"] + if "#" in url: + msg = f"Schema URLs cannot contain # due to python-jsonschema limitation: {url}" + raise RuntimeError(msg) + path = Path(__file__).parent.resolve() / f"{kind}.json" + _logger.debug("Refreshing %s schema ...", kind) + request = Request(url) + etag = data.get("etag", "") + if etag: + request.add_header("If-None-Match", f'"{data.get("etag")}"') + try: + with urllib.request.urlopen(request, timeout=10) as response: # noqa: S310 + if response.status == 200: + content = response.read().decode("utf-8").rstrip() + etag = response.headers["etag"].strip('"') + if etag != data.get("etag", ""): + JSON_SCHEMAS[kind]["etag"] = etag + changed += 1 + with path.open("w", encoding="utf-8") as f_out: + _logger.info("Schema %s was updated", kind) + f_out.write(content) + f_out.write("\n") # prettier/editors + f_out.truncate() + os.fsync(f_out.fileno()) + # unload possibly loaded schema + if kind in _schema_cache: # pragma: no cover + del _schema_cache[kind] + except (ConnectionError, OSError, HTTPException) as exc: + if ( + isinstance(exc, urllib.error.HTTPError) + and getattr(exc, "code", None) == 304 + ): + _logger.debug("Schema %s is not modified", url) + continue + # In case of networking issues, we just stop and use last-known good + _logger.debug("Skipped schema refresh due to unexpected exception: %s", exc) + break + if changed: # pragma: no cover + with store_file.open("w", encoding="utf-8") as f_out: + # formatting should match our .prettierrc.yaml + json.dump(JSON_SCHEMAS, f_out, indent=2, sort_keys=True) + f_out.write("\n") # prettier and editors in general + # clear schema cache + get_schema.cache_clear() + else: + store_file.touch() + changed = 1 + return changed + + +if __name__ == "__main__": + if refresh_schemas(60 * 10): # pragma: no cover + print("Schemas were updated.") # noqa: T201 + sys.exit(1) + else: # pragma: no cover + print("Schemas not updated", 0) # noqa: T201 diff --git a/src/ansiblelint/schemas/__store__.json b/src/ansiblelint/schemas/__store__.json new file mode 100644 index 0000000..d4bcdca --- /dev/null +++ b/src/ansiblelint/schemas/__store__.json @@ -0,0 +1,62 @@ +{ + "ansible-lint-config": { + "etag": "0ec39ba1ca9c20aea463f7f536c6903c88288f47c1b2b2b3d53b527c293f8cc3", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible-lint-config.json" + }, + "ansible-navigator-config": { + "etag": "dd0f0dea68266ae61e5a8d6aed0a1279fdee16f2da4911bc27970241df80f798", + "url": "https://raw.githubusercontent.com/ansible/ansible-navigator/main/src/ansible_navigator/data/ansible-navigator.json" + }, + "changelog": { + "etag": "593ed5eef7c1e670f3667de70d43a41a5138513bd9640a85cbe8cb6faaa59793", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/changelog.json" + }, + "execution-environment": { + "etag": "f3abb1716134227ccd667607840dd7bdebfd02a8980603df031282126dc78264", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/execution-environment.json" + }, + "galaxy": { + "etag": "61f38feb51dc7eaff43ab22f3759b3a5202776ee75ee4204f07135282817f724", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/galaxy.json" + }, + "inventory": { + "etag": "3dcd4890bf31e634a7c4f6138286a42b4985393f210f7ffaa840c2127876aa55", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/inventory.json" + }, + "meta": { + "etag": "0f376059285181985711b4271a6ff34a8dde662b9fc221d09bdcd64e4fbf86bf", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/meta.json" + }, + "meta-runtime": { + "etag": "448b614e9d4411b82d220950b7a415c248cc75d5431f9b8058c771a595d40163", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/meta-runtime.json" + }, + "molecule": { + "etag": "3456b2e5aaa02fde359ff147cff81d01a37c07f5e10542b6b8b61aaaf8c756a6", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/molecule.json" + }, + "playbook": { + "etag": "acbd5edfc66279f8c3f6f8a99d0874669a254983ace5e4a2cce6105489ab3e21", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/playbook.json" + }, + "requirements": { + "etag": "93c6ccd1f79f58134795b85f9b1193d6e18417dd01a9d1f37d9f247562a1e6fe", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/requirements.json" + }, + "role-arg-spec": { + "etag": "498a6f716c7e99bd474ae9e7d34b3f43fbf2aad750f769392fc8e29fa590be6c", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/role-arg-spec.json" + }, + "rulebook": { + "etag": "f0bbd0ecd656b2298febccc6da0ecf4a7bd239cc112b9de8292c1f50bad612e0", + "url": "https://raw.githubusercontent.com/ansible/ansible-rulebook/main/ansible_rulebook/schema/ruleset_schema.json" + }, + "tasks": { + "etag": "f9fbc0855680d1321fa3902181131d73838d922362d8dfb85a4f59402240cc07", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/tasks.json" + }, + "vars": { + "etag": "5d6c2c22a58f2b48c2a8d8d129f2516e4f17ffc78a2c9ba045eb5ede0ff749d7", + "url": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/vars.json" + } +} diff --git a/src/ansiblelint/schemas/ansible-lint-config.json b/src/ansiblelint/schemas/ansible-lint-config.json new file mode 100644 index 0000000..f7d50e4 --- /dev/null +++ b/src/ansiblelint/schemas/ansible-lint-config.json @@ -0,0 +1,289 @@ +{ + "$defs": { + "rule": { + "additionalProperties": false, + "properties": { + "exclude_paths": { + "items": { + "type": "string" + }, + "title": "Glob-like paths to be excluded.", + "type": "array" + } + }, + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible-lint-config.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": [ + ".ansible-lint", + ".config/ansible-lint.yml", + ".config/ansible-lint.yaml" + ], + "properties": { + "display_relative_path": { + "default": true, + "title": "Configure how to display file paths", + "type": "boolean" + }, + "enable_list": { + "items": { + "type": "string" + }, + "title": "Enable List", + "type": "array" + }, + "exclude_paths": { + "items": { + "type": "string" + }, + "title": "Exclude Paths", + "type": "array" + }, + "extra_vars": { + "title": "Extra Vars", + "type": "object" + }, + "kinds": { + "items": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "title": "Kinds", + "type": "array" + }, + "loop_var_prefix": { + "title": "Loop Var Prefix", + "type": "string" + }, + "mock_modules": { + "items": { + "type": "string" + }, + "title": "Mock Modules", + "type": "array" + }, + "mock_roles": { + "items": { + "type": "string" + }, + "title": "Mock Roles", + "type": "array" + }, + "offline": { + "default": false, + "title": "Offline", + "type": "boolean" + }, + "only_builtins_allow_collections": { + "items": { + "type": "string" + }, + "title": "Only Builtins Allow Collections", + "type": "array" + }, + "only_builtins_allow_modules": { + "items": { + "type": "string" + }, + "title": "Only Builtins Allow Modules", + "type": "array" + }, + "parseable": { + "default": true, + "title": "Parseable", + "type": "boolean" + }, + "profile": { + "enum": [ + "min", + "basic", + "moderate", + "safety", + "shared", + "production", + null + ], + "title": "Profile", + "type": ["null", "string"] + }, + "progressive": { + "default": false, + "title": "Progressive (removed feature)", + "type": "boolean" + }, + "project_dir": { + "default": null, + "title": "Project Directory", + "type": ["string", "null"] + }, + "quiet": { + "default": true, + "title": "Quiet", + "type": "boolean" + }, + "rules": { + "additionalProperties": { + "$ref": "#/$defs/rule" + }, + "propertyNames": { + "oneOf": [ + { + "enum": [ + "command-instead-of-module", + "command-instead-of-shell", + "deprecated-bare-vars", + "deprecated-local-action", + "deprecated-module", + "empty-string-compare", + "fqcn", + "fqcn[action-core]", + "fqcn[action]", + "fqcn[canonical]", + "fqcn[keyword]", + "galaxy", + "galaxy[no-changelog]", + "galaxy[no-runtime]", + "galaxy[tags]", + "galaxy[version-incorrect]", + "galaxy[version-missing]", + "ignore-errors", + "inline-env-var", + "internal-error", + "jinja", + "jinja[invalid]", + "jinja[spacing]", + "key-order", + "latest", + "literal-compare", + "load-failure", + "load-failure[not-found]", + "loop-var-prefix", + "loop-var-prefix[missing]", + "loop-var-prefix[wrong]", + "meta-incorrect", + "meta-no-tags", + "meta-runtime", + "meta-video-links", + "name", + "name[casing]", + "name[play]", + "name[prefix]", + "name[template]", + "no-changed-when", + "no-handler", + "no-jinja-when", + "no-log-password", + "no-prompting", + "no-relative-paths", + "no-same-owner", + "no-tabs", + "only-builtins", + "package-latest", + "parser-error", + "partial-become", + "playbook-extension", + "risky-file-permissions", + "risky-octal", + "risky-shell-pipe", + "role-name", + "run-once", + "run-once[play]", + "run-once[task]", + "sanity", + "sanity[bad-ignore]", + "sanity[cannot-ignore]", + "schema", + "syntax-check", + "var-naming", + "yaml" + ], + "type": "string" + }, + { + "pattern": "^[a-z0-9-\\[\\]]+$", + "type": "string" + } + ] + }, + "title": "Rules specific configuration.", + "type": "object" + }, + "rulesdir": { + "items": { + "type": "string" + }, + "title": "Rulesdir", + "type": "array" + }, + "sarif_file": { + "default": null, + "title": "SARIF Output filename", + "type": ["null", "string"] + }, + "skip_action_validation": { + "default": false, + "title": "Skip Action Validation", + "type": "boolean" + }, + "skip_list": { + "items": { + "type": "string" + }, + "title": "Skip List", + "type": "array" + }, + "strict": { + "default": false, + "title": "Strict", + "type": "boolean" + }, + "tags": { + "items": { + "type": "string" + }, + "title": "Tags", + "type": "array" + }, + "task_name_prefix": { + "default": "{stem} | ", + "title": "Allow custom prefix for task[prefix]", + "type": "string" + }, + "use_default_rules": { + "default": true, + "title": "Use Default Rules", + "type": "boolean" + }, + "var_naming_pattern": { + "default": "^[a-z_][a-z0-9_]*$", + "title": "Regex used to verify variable names", + "type": "string" + }, + "verbosity": { + "default": 0, + "title": "Verbosity", + "type": "integer" + }, + "warn_list": { + "items": { + "type": "string" + }, + "title": "Warn List", + "type": "array" + }, + "write_list": { + "items": { + "type": "string" + }, + "title": "Write List", + "type": "array" + } + }, + "title": "Ansible-lint Configuration Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/ansible-navigator-config.json b/src/ansiblelint/schemas/ansible-navigator-config.json new file mode 100644 index 0000000..e81a878 --- /dev/null +++ b/src/ansiblelint/schemas/ansible-navigator-config.json @@ -0,0 +1,530 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "properties": { + "ansible-navigator": { + "additionalProperties": false, + "properties": { + "ansible": { + "additionalProperties": false, + "properties": { + "cmdline": { + "description": "Extra parameters passed to the corresponding command", + "type": "string" + }, + "config": { + "additionalProperties": false, + "properties": { + "help": { + "default": false, + "description": "Help options for ansible-config command in stdout mode", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "path": { + "description": "Specify the path to the ansible configuration file", + "type": "string" + } + } + }, + "doc": { + "additionalProperties": false, + "properties": { + "help": { + "default": false, + "description": "Help options for ansible-doc command in stdout mode", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "plugin": { + "additionalProperties": false, + "properties": { + "name": { + "description": "Specify the plugin name", + "type": "string" + }, + "type": { + "default": "module", + "description": "Specify the plugin type, 'become', 'cache', 'callback', 'cliconf', 'connection', 'filter', 'httpapi', 'inventory', 'keyword', 'lookup', 'module', 'netconf', 'role', 'shell', 'strategy', 'test' or 'vars'", + "enum": [ + "become", + "cache", + "callback", + "cliconf", + "connection", + "filter", + "httpapi", + "inventory", + "keyword", + "lookup", + "module", + "netconf", + "role", + "shell", + "strategy", + "test", + "vars" + ], + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "inventory": { + "additionalProperties": false, + "properties": { + "entries": { + "description": "Specify an inventory file path or comma separated host list", + "items": { + "type": "string" + }, + "type": "array" + }, + "help": { + "default": false, + "description": "Help options for ansible-inventory command in stdout mode", + "enum": [ + true, + false + ], + "type": "boolean" + } + } + }, + "playbook": { + "additionalProperties": false, + "properties": { + "help": { + "default": false, + "description": "Help options for ansible-playbook command in stdout mode", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "path": { + "description": "Specify the playbook name", + "type": "string" + } + } + } + }, + "type": "object" + }, + "ansible-builder": { + "additionalProperties": false, + "properties": { + "help": { + "default": false, + "description": "Help options for ansible-builder command in stdout mode", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "workdir": { + "default": ".", + "description": "Specify the path that contains ansible-builder manifest files", + "type": "string" + } + }, + "type": "object" + }, + "ansible-lint": { + "additionalProperties": false, + "properties": { + "config": { + "description": "Specify the path to the ansible-lint configuration file", + "type": "string" + }, + "lintables": { + "description": "Path to files on which to run ansible-lint", + "type": "string" + } + }, + "type": "object" + }, + "ansible-runner": { + "additionalProperties": false, + "properties": { + "artifact-dir": { + "description": "The directory path to store artifacts generated by ansible-runner", + "type": "string" + }, + "job-events": { + "default": false, + "description": "Write ansible-runner job_events in the artifact directory", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "rotate-artifacts-count": { + "description": "Keep ansible-runner artifact directories, for last n runs, if set to 0 artifact directories won't be deleted", + "type": "integer" + }, + "timeout": { + "description": "The timeout value after which ansible-runner will forcefully stop the execution", + "type": "integer" + } + }, + "type": "object" + }, + "app": { + "default": "welcome", + "description": "Subcommands", + "enum": [ + "builder", + "collections", + "config", + "doc", + "exec", + "images", + "inventory", + "lint", + "replay", + "run", + "settings", + "welcome" + ], + "type": "string" + }, + "collection-doc-cache-path": { + "default": "~/.cache/ansible-navigator/collection_doc_cache.db", + "description": "The path to collection doc cache", + "type": "string" + }, + "color": { + "additionalProperties": false, + "properties": { + "enable": { + "default": true, + "description": "Enable the use of color for mode interactive and stdout", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "osc4": { + "default": true, + "description": "Enable or disable terminal color changing support with OSC 4", + "enum": [ + true, + false + ], + "type": "boolean" + } + }, + "type": "object" + }, + "editor": { + "additionalProperties": false, + "properties": { + "command": { + "default": "vi +{line_number} {filename}", + "description": "Specify the editor command", + "type": "string" + }, + "console": { + "default": true, + "description": "Specify if the editor is console based", + "enum": [ + true, + false + ], + "type": "boolean" + } + }, + "type": "object" + }, + "enable-prompts": { + "default": false, + "description": "Enable prompts for password and in playbooks. This will set mode to stdout and disable playbook artifact creation", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "exec": { + "additionalProperties": false, + "properties": { + "command": { + "default": "/bin/bash", + "description": "Specify the command to run within the execution environment", + "type": "string" + }, + "shell": { + "default": true, + "description": "Specify the exec command should be run in a shell", + "enum": [ + true, + false + ], + "type": "boolean" + } + }, + "type": "object" + }, + "execution-environment": { + "additionalProperties": false, + "properties": { + "container-engine": { + "default": "auto", + "description": "Specify the container engine (auto=podman then docker)", + "enum": [ + "auto", + "podman", + "docker" + ], + "type": "string" + }, + "container-options": { + "description": "Extra parameters passed to the container engine command", + "items": { + "type": "string" + }, + "type": "array" + }, + "enabled": { + "default": true, + "description": "Enable or disable the use of an execution environment", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "environment-variables": { + "additionalProperties": false, + "properties": { + "pass": { + "description": "Specify an existing environment variable to be passed through to and set within the execution environment (--penv MY_VAR)", + "items": { + "type": "string" + }, + "type": "array" + }, + "set": { + "description": "Specify an environment variable and a value to be set within the execution environment (--senv MY_VAR=42)", + "type": "object" + } + }, + "type": "object" + }, + "image": { + "description": "Specify the name of the execution environment image", + "type": "string" + }, + "pull": { + "additionalProperties": false, + "properties": { + "arguments": { + "description": "Specify any additional parameters that should be added to the pull command when pulling an execution environment from a container registry. e.g. --pa='--tls-verify=false'", + "items": { + "type": "string" + }, + "type": "array" + }, + "policy": { + "default": "tag", + "description": "Specify the image pull policy always:Always pull the image, missing:Pull if not locally available, never:Never pull the image, tag:if the image tag is 'latest', always pull the image, otherwise pull if not locally available", + "enum": [ + "always", + "missing", + "never", + "tag" + ], + "type": "string" + } + } + }, + "volume-mounts": { + "additionalProperties": false, + "description": "Specify volume to be bind mounted within an execution environment (--eev /home/user/test:/home/user/test:Z)", + "items": { + "additionalProperties": false, + "properties": { + "dest": { + "type": "string" + }, + "options": { + "type": "string" + }, + "src": { + "type": "string" + } + }, + "required": [ + "src", + "dest" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "format": { + "default": "yaml", + "description": "Specify the format for stdout output.", + "enum": [ + "json", + "yaml" + ], + "type": "string" + }, + "images": { + "additionalProperties": false, + "properties": { + "details": { + "default": [ + "everything" + ], + "description": "Provide detailed information about the selected execution environment image", + "items": { + "enum": [ + "ansible_collections", + "ansible_version", + "everything", + "os_release", + "python_packages", + "python_version", + "redhat_release", + "system_packages" + ], + "type": "string" + }, + "type": "array" + } + } + }, + "inventory-columns": { + "description": "Specify a host attribute to show in the inventory view", + "items": { + "type": "string" + }, + "type": "array" + }, + "logging": { + "additionalProperties": false, + "properties": { + "append": { + "default": true, + "description": "Specify if log messages should be appended to an existing log file, otherwise a new log file will be created per session", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "file": { + "default": "./ansible-navigator.log", + "description": "Specify the full path for the ansible-navigator log file", + "type": "string" + }, + "level": { + "default": "warning", + "description": "Specify the ansible-navigator log level", + "enum": [ + "debug", + "info", + "warning", + "error", + "critical" + ], + "type": "string" + } + }, + "type": "object" + }, + "mode": { + "default": "interactive", + "description": "Specify the user-interface mode", + "enum": [ + "stdout", + "interactive" + ], + "type": "string" + }, + "playbook-artifact": { + "additionalProperties": false, + "properties": { + "enable": { + "default": true, + "description": "Enable or disable the creation of artifacts for completed playbooks. Note: not compatible with '--mode stdout' when playbooks require user input", + "enum": [ + true, + false + ], + "type": "boolean" + }, + "replay": { + "description": "Specify the path for the playbook artifact to replay", + "type": "string" + }, + "save-as": { + "default": "{playbook_dir}/{playbook_name}-artifact-{time_stamp}.json", + "description": "Specify the name for artifacts created from completed playbooks. The following placeholders are available: {playbook_dir}, {playbook_name}, {playbook_status}, and {time_stamp}", + "type": "string" + } + }, + "type": "object" + }, + "settings": { + "additionalProperties": false, + "properties": { + "effective": { + "default": false, + "description": "Show the effective settings. Defaults, CLI parameters, environment variables, and the settings file will be combined", + "type": "boolean" + }, + "sample": { + "default": false, + "description": "Generate a sample settings file", + "type": "boolean" + }, + "schema": { + "default": "json", + "description": "Generate a schema for the settings file ('json'= draft-07 JSON Schema)", + "enum": [ + "json" + ], + "type": "string" + }, + "sources": { + "default": false, + "description": "Show the source of each current settings entry", + "type": "boolean" + } + } + }, + "time-zone": { + "default": "UTC", + "description": "Specify the IANA time zone to use or 'local' to use the system time zone", + "type": "string" + } + } + } + }, + "required": [ + "ansible-navigator" + ], + "title": "ansible-navigator settings v3", + "type": "object", + "version": "3" +} diff --git a/src/ansiblelint/schemas/ansible-navigator.json b/src/ansiblelint/schemas/ansible-navigator.json new file mode 100644 index 0000000..be83649 --- /dev/null +++ b/src/ansiblelint/schemas/ansible-navigator.json @@ -0,0 +1,430 @@ +{ + "$defs": { + "AnsibleBuilderModel": { + "additionalProperties": false, + "properties": { + "workdir": { + "default": "/tmp/", + "description": "Specify the path that contains ansible-builder manifest files", + "title": "Workdir", + "type": "string" + } + }, + "type": "object" + }, + "AnsibleModel": { + "additionalProperties": false, + "properties": { + "cmdline": { + "description": "Extra parameters passed to the corresponding command", + "title": "Cmdline", + "type": "string" + }, + "config": { + "description": "Specify the path to the ansible configuration file", + "title": "Config", + "type": "string" + }, + "inventories": { + "description": "Specify an inventory file path or host list", + "items": { + "type": "string" + }, + "title": "Inventories", + "type": "array" + }, + "playbook": { + "description": "Specify the playbook name", + "title": "Playbook", + "type": "string" + } + }, + "title": "AnsibleModel", + "type": "object" + }, + "AnsibleNavigatorModel": { + "additionalProperties": false, + "properties": { + "ansible": { + "$ref": "#/$defs/AnsibleModel" + }, + "ansible-builder": { + "$ref": "#/$defs/AnsibleBuilderModel" + }, + "ansible-runner": { + "$ref": "#/$defs/AnsibleRunnerModel" + }, + "app": { + "default": "welcome", + "description": "Subcommands", + "enum": [ + "collections", + "config", + "doc", + "exec", + "images", + "inventory", + "replay", + "run", + "welcome" + ], + "title": "App", + "type": "string" + }, + "collection-doc-cache-path": { + "default": "$HOME/.cache/ansible-navigator/collection_doc_cache.db", + "description": "The path to collection doc cache", + "title": "Collection-Doc-Cache-Path", + "type": "string" + }, + "color": { + "$ref": "#/$defs/ColorModel" + }, + "documentation": { + "$ref": "#/$defs/DocumentationModel" + }, + "editor": { + "$ref": "#/$defs/EditorModel" + }, + "exec": { + "$ref": "#/$defs/ExecModel" + }, + "execution-environment": { + "$ref": "#/$defs/ExecutionEnvironmentModel" + }, + "help-builder": { + "default": false, + "description": "Help options for ansible-builder command in stdout mode", + "title": "Help-Builder", + "type": "boolean" + }, + "help-config": { + "default": false, + "description": "Help options for ansible-config command in stdout mode", + "title": "Help-Config", + "type": "boolean" + }, + "help-doc": { + "default": false, + "description": "Help options for ansible-doc command in stdout mode", + "title": "Help-Doc", + "type": "boolean" + }, + "help-inventory": { + "default": false, + "description": "Help options for ansible-inventory command in stdout mode", + "title": "Help-Inventory", + "type": "boolean" + }, + "help-playbook": { + "default": false, + "description": "Help options for ansible-playbook command in stdout mode", + "title": "Help-Playbook", + "type": "boolean" + }, + "inventory-columns": { + "description": "Specify a host attribute to show in the inventory view", + "items": { + "type": "string" + }, + "title": "Inventory-Columns", + "type": "array" + }, + "logging": { + "$ref": "#/$defs/LoggingModel" + }, + "mode": { + "default": "interactive", + "description": "Specify the user-interface mode", + "enum": ["stdout", "interactive"], + "title": "Mode", + "type": "string" + }, + "playbook-artifact": { + "$ref": "#/$defs/PlaybookArtifactModel" + } + }, + "title": "AnsibleNavigatorModel", + "type": "object" + }, + "AnsibleRunnerModel": { + "additionalProperties": false, + "properties": { + "artifact-dir": { + "description": "The directory path to store artifacts generated by ansible-runner", + "title": "Artifact-Dir", + "type": "string" + }, + "rotate-artifacts-count": { + "description": "Keep ansible-runner artifact directories, for last n runs, if set to 0 artifact directories won't be deleted", + "title": "Rotate-Artifacts-Count", + "type": "integer" + }, + "timeout": { + "description": "The timeout value after which ansible-runner will force stop the execution", + "title": "Timeout", + "type": "integer" + } + }, + "title": "AnsibleRunnerModel", + "type": "object" + }, + "ColorModel": { + "additionalProperties": false, + "properties": { + "enable": { + "default": false, + "description": "Enable the use of color in the display", + "title": "Enable", + "type": "boolean" + }, + "osc4": { + "default": true, + "description": "Enable or disable terminal color changing support with OSC 4", + "title": "Osc4", + "type": "boolean" + } + }, + "title": "ColorModel", + "type": "object" + }, + "DocumentationModel": { + "additionalProperties": false, + "properties": { + "plugin": { + "$ref": "#/$defs/PluginModel" + } + }, + "title": "DocumentationModel", + "type": "object" + }, + "EditorModel": { + "additionalProperties": false, + "properties": { + "command": { + "default": "vi +{line_number} {filename}", + "description": "Specify the editor command", + "title": "Command", + "type": "string" + }, + "console": { + "default": true, + "description": "Specify if the editor is console based", + "title": "Console", + "type": "boolean" + } + }, + "title": "EditorModel", + "type": "object" + }, + "EnvironmentVariablesModel": { + "additionalProperties": false, + "properties": { + "pass": { + "description": "Specify an exiting environment variable to be passed through to and set within the execution environment", + "items": { + "type": "string" + }, + "title": "Pass", + "type": "array" + }, + "set": { + "additionalProperties": { + "type": "string" + }, + "description": "Specify an environment variable and a value to be set within the execution environment", + "title": "Set", + "type": "object" + } + }, + "title": "EnvironmentVariablesModel", + "type": "object" + }, + "ExecModel": { + "additionalProperties": false, + "properties": { + "command": { + "default": "/bin/bash", + "description": "Specify the command to run within the execution environment", + "title": "Command", + "type": "string" + }, + "shell": { + "default": true, + "description": "Specify the exec command should be run in a shell", + "title": "Shell", + "type": "boolean" + } + }, + "title": "ExecModel", + "type": "object" + }, + "ExecutionEnvironmentModel": { + "additionalProperties": false, + "properties": { + "container-engine": { + "default": "auto", + "description": "Specify the container engine (auto=podman then docker)", + "enum": ["auto", "podman", "docker"], + "title": "Container-Engine", + "type": "string" + }, + "container-options": { + "description": "Extra parameters passed to the container engine command", + "items": { + "type": "string" + }, + "title": "Container-Options", + "type": "array" + }, + "enabled": { + "default": true, + "description": "Enable or disable the use of an execution environment", + "title": "Enabled", + "type": "boolean" + }, + "environment-variables": { + "$ref": "#/$defs/EnvironmentVariablesModel" + }, + "image": { + "default": "quay.io/ansible/creator-ee:v0.2.0", + "description": "Specify the name of the execution environment image", + "title": "Image", + "type": "string" + }, + "pull-policy": { + "default": "tag", + "description": "Specify the image pull policy.\nalways: Always pull the image\nmissing: Pull if not locally available\nnever: Never pull the image\ntag: if the image tag is 'latest', always pull the image, otherwise pull if not locally available", + "enum": ["always", "missing", "never", "tag"], + "title": "Pull-Policy", + "type": "string" + }, + "volume-mounts": { + "description": "Specify volume to be bind mounted within an execution environment", + "items": { + "$ref": "#/$defs/VolumeMountsModel" + }, + "title": "Volume-Mounts", + "type": "array" + } + }, + "title": "ExecutionEnvironmentModel", + "type": "object" + }, + "LoggingModel": { + "additionalProperties": false, + "properties": { + "append": { + "default": true, + "description": "Specify if log messages should be appended to an existing log file, otherwise a new log file will be created per session", + "title": "Append", + "type": "boolean" + }, + "file": { + "default": "$PWD/ansible-navigator.", + "description": "Specify the full path for the ansible-navigator log file", + "title": "File", + "type": "string" + }, + "level": { + "default": "warning", + "description": "Specify the ansible-navigator log level", + "enum": ["debug", "info", "warning", "error", "critical"], + "title": "Level", + "type": "string" + } + }, + "title": "LoggingModel", + "type": "object" + }, + "PlaybookArtifactModel": { + "additionalProperties": false, + "properties": { + "enable": { + "default": true, + "description": "Enable or disable the creation of artifacts for completed playbooks.\nNote: not compatible with 'mode: stdout' when playbooks require user input", + "title": "Enable", + "type": "boolean" + }, + "replay": { + "description": "Specify the path for the playbook artifact to replay", + "title": "Replay", + "type": "string" + }, + "save-as": { + "default": "{playbook_dir}/{playbook_name}-artifact-{ts_utc}.json", + "description": "Specify the name for artifacts created from completed playbooks", + "title": "Save-As", + "type": "string" + } + }, + "title": "PlaybookArtifactModel", + "type": "object" + }, + "PluginModel": { + "additionalProperties": false, + "properties": { + "name": { + "description": "Specify the plugin name", + "title": "Name", + "type": "string" + }, + "type": { + "default": "module", + "description": "Specify the plugin type", + "enum": [ + "become", + "cache", + "callback", + "cliconf", + "connection", + "httpapi", + "inventory", + "lookup", + "module", + "netconf", + "shell", + "strategy", + "vars" + ], + "title": "Type", + "type": "string" + } + }, + "title": "PluginModel", + "type": "object" + }, + "VolumeMountsModel": { + "additionalProperties": false, + "properties": { + "dest": { + "title": "Dest", + "type": "string" + }, + "label": { + "title": "Label", + "type": "string" + }, + "src": { + "title": "Src", + "type": "string" + } + }, + "required": ["src", "dest"], + "title": "VolumeMountsModel", + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible-navigator.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": ["ansible-navigator.yml"], + "properties": { + "ansible-navigator": { + "$ref": "#/$defs/AnsibleNavigatorModel" + } + }, + "required": ["ansible-navigator"], + "title": "Ansible-Navigator Configuration Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/ansible.json b/src/ansiblelint/schemas/ansible.json new file mode 100644 index 0000000..94846d0 --- /dev/null +++ b/src/ansiblelint/schemas/ansible.json @@ -0,0 +1,1202 @@ +{ + "$defs": { + "ansible.builtin.import_playbook": { + "additionalProperties": false, + "oneOf": [ + { + "not": { + "required": ["import_playbook"] + }, + "required": ["ansible.builtin.import_playbook"] + }, + { + "not": { + "required": ["ansible.builtin.import_playbook"] + }, + "required": ["import_playbook"] + } + ], + "patternProperties": { + "^(ansible\\.builtin\\.)?import_playbook$": { + "markdownDescription": "* Includes a file with a list of plays to be executed.\n * Files with a list of plays can only be included at the top level.\n * You cannot use this action inside a play.\n\nSee [import_playbook](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/import_playbook_module.html)", + "title": "Import Playbook", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tags": { + "$ref": "#/$defs/tags" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "type": "object" + }, + "become_method": { + "anyOf": [ + { + "enum": [ + "ansible.builtin.sudo", + "ansible.builtin.su", + "community.general.pbrun", + "community.general.pfexec", + "ansible.builtin.runas", + "community.general.dzdo", + "community.general.ksu", + "community.general.doas", + "community.general.machinectl", + "community.general.pmrun", + "community.general.sesu", + "community.general.sudosu" + ], + "type": "string" + }, + { + "$ref": "#/$defs/full-jinja" + }, + { + "pattern": "[A-Za-z0-9_\\.]+", + "type": "string" + } + ], + "markdownDescription": "See [become](https://docs.ansible.com/ansible/latest/user_guide/become.html)", + "title": "Become Method" + }, + "block": { + "properties": { + "always": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Always", + "type": "array" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "block": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "markdownDescription": "Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages. See [blocks](https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html)", + "title": "Block", + "type": "array" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "port": { + "$ref": "#/$defs/templated-integer" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "rescue": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Rescue", + "type": "array" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": ["block"], + "type": "object" + }, + "complex_conditional": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + ] + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja" + } + ], + "title": "Environment" + }, + "full-jinja": { + "pattern": "^\\{[\\{%](.|[\r\n])*[\\}%]\\}$", + "type": "string" + }, + "ignore_errors": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "See [ignore_errors](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#ignoring-failed-commands)", + "title": "Ignore Errors" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "Use for protecting sensitive data. See [no_log](https://docs.ansible.com/ansible/latest/reference_appendices/logging.html)", + "title": "no_log" + }, + "play": { + "additionalProperties": false, + "allOf": [ + { + "not": { + "required": ["ansible.builtin.import_playbook"] + } + }, + { + "not": { + "required": ["import_playbook"] + } + } + ], + "properties": { + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "fact_path": { + "title": "Fact Path", + "type": "string" + }, + "force_handlers": { + "title": "Force Handlers", + "type": "boolean" + }, + "gather_facts": { + "title": "Gather Facts", + "type": "boolean" + }, + "gather_subset": { + "items": { + "anyOf": [ + { + "enum": [ + "all", + "min", + "all_ipv4_addresses", + "all_ipv6_addresses", + "apparmor", + "architecture", + "caps", + "chroot,cmdline", + "date_time", + "default_ipv4", + "default_ipv6", + "devices", + "distribution", + "distribution_major_version", + "distribution_release", + "distribution_version", + "dns", + "effective_group_ids", + "effective_user_id", + "env", + "facter", + "fips", + "hardware", + "interfaces", + "is_chroot", + "iscsi", + "kernel", + "local", + "lsb", + "machine", + "machine_id", + "mounts", + "network", + "ohai", + "os_family", + "pkg_mgr", + "platform", + "processor", + "processor_cores", + "processor_count", + "python", + "python_version", + "real_user_id", + "selinux", + "service_mgr", + "ssh_host_key_dsa_public", + "ssh_host_key_ecdsa_public", + "ssh_host_key_ed25519_public", + "ssh_host_key_rsa_public", + "ssh_host_pub_keys", + "ssh_pub_keys", + "system", + "system_capabilities", + "system_capabilities_enforced", + "user", + "user_dir", + "user_gecos", + "user_gid", + "user_id", + "user_shell", + "user_uid", + "virtual", + "virtualization_role", + "virtualization_type" + ], + "type": "string" + }, + { + "enum": [ + "!all", + "!min", + "!all_ipv4_addresses", + "!all_ipv6_addresses", + "!apparmor", + "!architecture", + "!caps", + "!chroot,cmdline", + "!date_time", + "!default_ipv4", + "!default_ipv6", + "!devices", + "!distribution", + "!distribution_major_version", + "!distribution_release", + "!distribution_version", + "!dns", + "!effective_group_ids", + "!effective_user_id", + "!env", + "!facter", + "!fips", + "!hardware", + "!interfaces", + "!is_chroot", + "!iscsi", + "!kernel", + "!local", + "!lsb", + "!machine", + "!machine_id", + "!mounts", + "!network", + "!ohai", + "!os_family", + "!pkg_mgr", + "!platform", + "!processor", + "!processor_cores", + "!processor_count", + "!python", + "!python_version", + "!real_user_id", + "!selinux", + "!service_mgr", + "!ssh_host_key_dsa_public", + "!ssh_host_key_ecdsa_public", + "!ssh_host_key_ed25519_public", + "!ssh_host_key_rsa_public", + "!ssh_host_pub_keys", + "!ssh_pub_keys", + "!system", + "!system_capabilities", + "!system_capabilities_enforced", + "!user", + "!user_dir", + "!user_gecos", + "!user_gid", + "!user_id", + "!user_shell", + "!user_uid", + "!virtual", + "!virtualization_role", + "!virtualization_type" + ], + "type": "string" + } + ] + }, + "title": "Gather Subset", + "type": "array" + }, + "gather_timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Gather Timeout" + }, + "handlers": { + "$ref": "#/$defs/tasks" + }, + "hosts": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Hosts" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "max_fail_percentage": { + "title": "Max Fail Percentage", + "type": "number" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "order": { + "enum": [ + "default", + "sorted", + "reverse_sorted", + "reverse_inventory", + "shuffle" + ], + "title": "Order", + "type": "string" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "post_tasks": { + "$ref": "#/$defs/tasks" + }, + "pre_tasks": { + "$ref": "#/$defs/tasks" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "roles": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/play-role" + }, + { + "type": "string" + } + ] + }, + "markdownDescription": "Roles let you automatically load related vars, files, tasks, handlers, and other Ansible artifacts based on a known file structure. After you group your content in roles, you can easily reuse them and share them with other users.\n See [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#roles)", + "title": "Roles", + "type": "array" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "serial": { + "anyOf": [ + { + "$ref": "#/$defs/templated-integer-or-percent" + }, + { + "items": { + "$ref": "#/$defs/templated-integer-or-percent" + }, + "type": "array" + } + ], + "markdownDescription": "Integer, percentage or list of those. See [Setting the batch size with serial](https://docs.ansible.com/ansible/latest/user_guide/playbooks_strategies.html#setting-the-batch-size-with-serial)", + "title": "Batch size" + }, + "strategy": { + "title": "Strategy", + "type": "string" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "tasks": { + "$ref": "#/$defs/tasks" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "user": { + "title": "Remote User", + "type": "string" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "vars_files": { + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "title": "Vars Files", + "type": ["array", "string", "null"] + }, + "vars_prompt": { + "items": { + "$ref": "#/$defs/vars_prompt" + }, + "markdownDescription": "See [vars_prompt](https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html)", + "title": "vars_prompt", + "type": "array" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": ["hosts"], + "title": "play", + "type": "object" + }, + "play-role": { + "markdownDescription": "See [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#roles)", + "properties": { + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "role": { + "title": "Role", + "type": "string" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": ["role"], + "title": "play-role", + "type": "object" + }, + "playbook": { + "examples": ["playbooks/*.yml", "playbooks/*.yaml"], + "items": { + "oneOf": [ + { + "$ref": "#/$defs/ansible.builtin.import_playbook" + }, + { + "$ref": "#/$defs/play" + } + ] + }, + "title": "Ansible Playbook", + "type": "array" + }, + "tags": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Tags" + }, + "task": { + "additionalProperties": true, + "allOf": [ + { + "not": { + "required": ["hosts"] + } + }, + { + "not": { + "required": ["tasks"] + } + }, + { + "not": { + "required": ["import_playbook"] + } + }, + { + "not": { + "required": ["block"] + } + } + ], + "properties": { + "action": { + "title": "Action", + "type": "string" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "args": { + "$ref": "#/$defs/templated-object", + "title": "Args" + }, + "async": { + "$ref": "#/$defs/templated-integer", + "title": "Async" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "changed_when": { + "$ref": "#/$defs/complex_conditional", + "markdownDescription": "See [changed_when](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#defining-changed)", + "title": "Changed When" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delay": { + "$ref": "#/$defs/templated-integer", + "title": "Delay" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "failed_when": { + "$ref": "#/$defs/complex_conditional", + "title": "Failed When" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "listen": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "markdownDescription": "Applies only to handlers. See [listen](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html)", + "title": "Listen" + }, + "local_action": { + "title": "Local Action", + "type": ["string", "object"] + }, + "loop": { + "title": "Loop", + "type": ["string", "array"] + }, + "loop_control": { + "title": "Loop Control" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/no_log" + }, + "notify": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Notify" + }, + "poll": { + "$ref": "#/$defs/templated-integer", + "title": "Poll" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "register": { + "title": "Register", + "type": "string" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "retries": { + "$ref": "#/$defs/templated-integer", + "title": "Retries" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "until": { + "$ref": "#/$defs/complex_conditional", + "title": "Until" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + }, + "with_dict": { + "title": "With Dict" + }, + "with_fileglob": { + "title": "With Fileglob" + }, + "with_filetree": { + "title": "With Filetree" + }, + "with_first_found": { + "title": "With First Found" + }, + "with_indexed_items": { + "title": "With Indexed Items" + }, + "with_ini": { + "title": "With Ini" + }, + "with_inventory_hostnames": { + "title": "With Inventory Hostnames" + }, + "with_items": { + "anyOf": [ + { + "$ref": "#/$defs/full-jinja" + }, + { + "type": "array" + } + ], + "markdownDescription": "See [loops](https://docs.ansible.com/ansible/latest/user_guide/playbooks_loops.html#loops)", + "title": "With Items" + }, + "with_lines": { + "title": "With Lines" + }, + "with_random_choice": { + "title": "With Random Choice" + }, + "with_sequence": { + "title": "With Sequence" + }, + "with_subelements": { + "title": "With Subelements" + }, + "with_together": { + "title": "With Together" + } + }, + "title": "task", + "type": "object" + }, + "tasks": { + "$schema": "http://json-schema.org/draft-07/schema", + "examples": ["tasks/*.yml", "handlers/*.yml"], + "items": { + "anyOf": [ + { + "$ref": "#/$defs/block" + }, + { + "$ref": "#/$defs/task" + } + ] + }, + "title": "Ansible Tasks Schema", + "type": ["array", "null"] + }, + "templated-boolean": { + "oneOf": [ + { + "type": "boolean" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-integer": { + "oneOf": [ + { + "type": "integer" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-integer-or-percent": { + "oneOf": [ + { + "type": "integer" + }, + { + "pattern": "^\\d+\\.?\\d*%?$", + "type": "string" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-object": { + "oneOf": [ + { + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "vars_prompt": { + "additionalProperties": false, + "properties": { + "confirm": { + "title": "Confirm", + "type": "boolean" + }, + "default": { + "title": "Default", + "type": "string" + }, + "encrypt": { + "enum": [ + "des_crypt", + "bsdi_crypt", + "bigcrypt", + "crypt16", + "md5_crypt", + "bcrypt", + "sha1_crypt", + "sun_md5_crypt", + "sha256_crypt", + "sha512_crypt", + "apr_md5_crypt", + "phpass", + "pbkdf2_digest", + "cta_pbkdf2_sha1", + "dlitz_pbkdf2_sha1", + "scram", + "bsd_nthash" + ], + "title": "Encrypt", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "private": { + "default": true, + "title": "Private", + "type": "boolean" + }, + "prompt": { + "title": "Prompt", + "type": "string" + }, + "salt_size": { + "default": 8, + "title": "Salt Size", + "type": "integer" + }, + "unsafe": { + "default": false, + "markdownDescription": "See [unsafe](https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html#allowing-special-characters-in-vars-prompt-values)", + "title": "Unsafe", + "type": "boolean" + } + }, + "required": ["name", "prompt"], + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": [], + "title": "Ansible Schemas Bundle 22.4", + "type": ["array", "object"] +} diff --git a/src/ansiblelint/schemas/changelog.json b/src/ansiblelint/schemas/changelog.json new file mode 100644 index 0000000..c243700 --- /dev/null +++ b/src/ansiblelint/schemas/changelog.json @@ -0,0 +1,262 @@ +{ + "$defs": { + "plugin-descriptions": { + "items": { + "properties": { + "description": { + "markdownDescription": "Value of `short_description` from plugin `DOCUMENTATION`.", + "title": "Description", + "type": "string" + }, + "name": { + "markdownDescription": "It must not be the FQCN, but the name inside the collection.", + "pattern": "[a-zA-Z0-9_]+", + "title": "Name", + "type": "string" + }, + "namespace": { + "type": "null" + } + }, + "type": "object" + }, + "type": "array" + }, + "release": { + "additionalProperties": false, + "properties": { + "changes": { + "additionalProperties": true, + "properties": { + "breaking_changes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "bugfixes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "deprecated_features": { + "items": { + "type": "string" + }, + "type": "array" + }, + "known_issues": { + "items": { + "type": "string" + }, + "type": "array" + }, + "major_changes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "minor_changes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "release_summary": { + "markdownDescription": "This must be valid [reStructuredText](https://en.wikipedia.org/wiki/ReStructuredText).", + "title": "Release Summary", + "type": "string" + }, + "removed_features": { + "items": { + "type": "string" + }, + "type": "array" + }, + "security_fixes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "trivial": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "codename": { + "type": "string" + }, + "fragments": { + "items": { + "type": "string" + }, + "markdownDescription": "List of strings representing filenames of changelog framents.", + "type": "array" + }, + "modules": { + "items": { + "properties": { + "description": { + "markdownDescription": "Value of `short_description` from plugin `DOCUMENTATION`.", + "title": "Description", + "type": "string" + }, + "name": { + "markdownDescription": "It must not be the FQCN, but the name inside the collection.", + "pattern": "[a-zA-Z0-9_]+", + "title": "Short module name", + "type": "string" + }, + "namespace": { + "markdownDescription": "Must be `''` for modules directly in `plugins/modules/`, or the dot-separated list of directories the module is in inside the `plugins/modules/` directory. The namespace is used to group new modules by their namespace inside the collection.", + "title": "Namespace", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "objects": { + "additionalProperties": false, + "properties": { + "playbook": { + "items": { + "properties": { + "description": { + "markdownDescription": "A short description of what the playbook does.", + "title": "Description", + "type": "string" + }, + "name": { + "markdownDescription": "It must not be the FQCN, but the name inside the collection.", + "pattern": "[a-zA-Z0-9_]+", + "title": "Short playbook name", + "type": "string" + }, + "namespace": { + "type": "null" + } + }, + "type": "object" + }, + "type": "array" + }, + "role": { + "items": { + "properties": { + "description": { + "markdownDescription": "Value of `short_description` from role's argument spec.", + "title": "Description", + "type": "string" + }, + "name": { + "markdownDescription": "It must not be the FQCN, but the name inside the collection.", + "pattern": "[a-zA-Z0-9_]+", + "title": "Short role name", + "type": "string" + }, + "namespace": { + "type": "null" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "plugins": { + "additionalProperties": false, + "properties": { + "become": { + "$ref": "#/$defs/plugin-descriptions" + }, + "cache": { + "$ref": "#/$defs/plugin-descriptions" + }, + "callback": { + "$ref": "#/$defs/plugin-descriptions" + }, + "cliconf": { + "$ref": "#/$defs/plugin-descriptions" + }, + "connection": { + "$ref": "#/$defs/plugin-descriptions" + }, + "filter": { + "$ref": "#/$defs/plugin-descriptions" + }, + "httpapi": { + "$ref": "#/$defs/plugin-descriptions" + }, + "inventory": { + "$ref": "#/$defs/plugin-descriptions" + }, + "lookup": { + "$ref": "#/$defs/plugin-descriptions" + }, + "netconf": { + "$ref": "#/$defs/plugin-descriptions" + }, + "shell": { + "$ref": "#/$defs/plugin-descriptions" + }, + "strategy": { + "$ref": "#/$defs/plugin-descriptions" + }, + "test": { + "$ref": "#/$defs/plugin-descriptions" + }, + "vars": { + "$ref": "#/$defs/plugin-descriptions" + } + }, + "type": "object" + }, + "release_date": { + "format": "date", + "markdownDescription": "Use ISO-8601 date format, like 2020-12-31", + "pattern": "\\d\\d\\d\\d-\\d\\d-\\d\\d", + "title": "Date of the release.", + "type": "string" + } + }, + "type": "object" + }, + "semver": { + "pattern": "\\d+.\\d+.\\d+.*", + "title": "Version string following SemVer specification.", + "type": ["string", "null"] + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/changelog.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": ["changelogs/changelog.yaml"], + "markdownDescription": "Antsibull Changelog Schema is based on [changelog.yaml-format.md](https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelog.yaml-format.md).", + "properties": { + "ancestor": { + "$ref": "#/$defs/semver" + }, + "releases": { + "patternProperties": { + "\\d+.\\d+.\\d+.*": { + "$ref": "#/$defs/release", + "type": "object" + } + }, + "type": "object" + } + }, + "title": "Antsibull Changelog Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/execution-environment.json b/src/ansiblelint/schemas/execution-environment.json new file mode 100644 index 0000000..4720a93 --- /dev/null +++ b/src/ansiblelint/schemas/execution-environment.json @@ -0,0 +1,309 @@ +{ + "$defs": { + "TYPE_DictOrStringOrListOfStrings": { + "anyOf": [ + { "type": "object" }, + { "type": "string" }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "TYPE_StringOrListOfStrings": { + "anyOf": [ + { "type": "string" }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "v1": { + "additionalProperties": false, + "properties": { + "additional_build_steps": { + "properties": { + "append": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "prepend": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + } + }, + "title": "Commands to append or prepend to container build process.", + "type": "object" + }, + "ansible_config": { + "examples": ["ansible.cfg"], + "title": "Ansible configuration file", + "type": "string" + }, + "build_arg_defaults": { + "additionalProperties": true, + "properties": { + "EE_BASE_IMAGE": { + "type": "string" + } + }, + "type": "object" + }, + "dependencies": { + "description": "Allows adding system, python or galaxy dependencies.", + "properties": { + "galaxy": { + "examples": ["requirements.yml"], + "markdownDescription": "Example `requirements.yml`", + "title": "Optional galaxy file", + "type": "string" + }, + "python": { + "examples": ["requirements.txt"], + "markdownDescription": "Example `requirements.txt`", + "title": "Optional python package dependencies", + "type": "string" + }, + "system": { + "examples": ["bindep.txt"], + "markdownDescription": "Example `bindep.txt`", + "title": "Optional system dependencies using bindep format", + "type": "string" + } + }, + "title": "Dependencies", + "type": "object" + }, + "version": { + "enum": [1], + "title": "Version", + "type": "integer" + } + }, + "required": ["version", "dependencies"], + "title": "Ansible Execution Environment Schema v1", + "type": "object" + }, + "v3": { + "additionalProperties": false, + "properties": { + "additional_build_files": { + "description": "Describes files to add to the build context", + "items": { + "additionalProperties": false, + "properties": { + "dest": { + "description": "Relative subdirectory under build context to place file", + "type": "string" + }, + "src": { + "description": "File to add to build context", + "type": "string" + } + }, + "required": ["src", "dest"], + "type": "object" + }, + "type": "array" + }, + "additional_build_steps": { + "properties": { + "append_base": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "append_builder": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "append_final": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "append_galaxy": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "prepend_base": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "prepend_builder": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "prepend_final": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + }, + "prepend_galaxy": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["RUN cat /etc/os-release"] + } + }, + "title": "Commands to append or prepend to container build process.", + "type": "object" + }, + "build_arg_defaults": { + "additionalProperties": false, + "properties": { + "ANSIBLE_GALAXY_CLI_COLLECTION_OPTS": { + "type": "string" + }, + "ANSIBLE_GALAXY_CLI_ROLE_OPTS": { + "type": "string" + }, + "PKGMGR_PRESERVE_CACHE": { + "type": "string" + } + }, + "type": "object" + }, + "dependencies": { + "description": "Allows adding system, python or galaxy dependencies.", + "properties": { + "ansible_core": { + "additionalProperties": false, + "description": "Ansible package installation", + "oneOf": [{ "required": ["package_pip"] }], + "properties": { + "package_pip": { + "description": "Ansible package to install via pip", + "type": "string" + } + }, + "type": "object" + }, + "ansible_runner": { + "additionalProperties": false, + "description": "Ansible Runner package installation", + "oneOf": [{ "required": ["package_pip"] }], + "properties": { + "package_pip": { + "description": "Ansible Runner package to install via pip", + "type": "string" + } + }, + "type": "object" + }, + "galaxy": { + "$ref": "#/$defs/TYPE_DictOrStringOrListOfStrings", + "examples": ["requirements.yml"], + "markdownDescription": "Example `requirements.yml`", + "title": "Optional galaxy file" + }, + "python": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["requirements.txt"], + "markdownDescription": "Example `requirements.txt`", + "title": "Optional python package dependencies" + }, + "python_interpreter": { + "additionalProperties": false, + "description": "Python package name and path", + "properties": { + "package_system": { + "description": "The python package to install via system package manager", + "type": "string" + }, + "python_path": { + "description": "Path to the python interpreter", + "type": "string" + } + }, + "type": "object" + }, + "system": { + "$ref": "#/$defs/TYPE_StringOrListOfStrings", + "examples": ["bindep.txt"], + "markdownDescription": "Example `bindep.txt`", + "title": "Optional system dependencies using bindep format" + } + }, + "title": "Dependencies", + "type": "object" + }, + "images": { + "additionalProperties": false, + "properties": { + "base_image": { + "name": { + "examples": [ + "registry.redhat.io/ansible-automation-platform-21/ee-minimal-rhel8:latest" + ], + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "options": { + "additionalProperties": false, + "description": "Options that effect runtime behavior", + "properties": { + "container_init": { + "additionalProperties": false, + "description": "Customize container startup behavior", + "properties": { + "cmd": { + "description": "literal value for CMD Containerfile directive", + "type": "string" + }, + "entrypoint": { + "description": "literal value for ENTRYPOINT Containerfile directive", + "type": "string" + }, + "package_pip": { + "description": "package to install via pip for entrypoint support", + "type": "string" + } + }, + "type": "object" + }, + "package_manager_path": { + "description": "Path to the system package manager to use", + "type": "string" + }, + "relax_passwd_permissions": { + "description": "allows GID0 write access to /etc/passwd; currently necessary for many uses", + "type": "boolean" + }, + "skip_ansible_check": { + "description": "Disables the check for Ansible/Runner in final image", + "type": "boolean" + }, + "user": { + "description": "Sets the username or UID", + "type": "string" + }, + "workdir": { + "description": "Default working directory, also often the homedir for ephemeral UIDs", + "type": ["string", "null"] + } + }, + "type": "object" + }, + "version": { + "enum": [3], + "title": "Version", + "type": "integer" + } + }, + "required": ["version", "dependencies"], + "title": "Ansible Execution Environment Schema v3", + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/execution-environment.json", + "$schema": "http://json-schema.org/draft-07/schema", + "description": "See \nV1: https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html\nV3: https://ansible-builder.readthedocs.io/en/latest/definition/", + "examples": ["execution-environment.yml"], + "oneOf": [{ "$ref": "#/$defs/v3" }, { "$ref": "#/$defs/v1" }], + "title": "Ansible Execution Environment Schema v1/v3" +} diff --git a/src/ansiblelint/schemas/galaxy.json b/src/ansiblelint/schemas/galaxy.json new file mode 100644 index 0000000..6381f28 --- /dev/null +++ b/src/ansiblelint/schemas/galaxy.json @@ -0,0 +1,643 @@ +{ + "$defs": { + "CollectionVersionConstraintModel": { + "additionalProperties": false, + "title": "CollectionVersionConstraintModel", + "type": "string" + }, + "SPDXLicense": { + "$ref": "#/$defs/SPDXLicenseEnum", + "title": "SPDXLicense" + }, + "SPDXLicenseEnum": { + "description": "An enumeration.", + "enum": [ + "0BSD", + "AAL", + "ADSL", + "AFL-1.1", + "AFL-1.2", + "AFL-2.0", + "AFL-2.1", + "AFL-3.0", + "AGPL-1.0-only", + "AGPL-1.0-or-later", + "AGPL-3.0-only", + "AGPL-3.0-or-later", + "AMDPLPA", + "AML", + "AMPAS", + "ANTLR-PD", + "ANTLR-PD-fallback", + "APAFML", + "APL-1.0", + "APSL-1.0", + "APSL-1.1", + "APSL-1.2", + "APSL-2.0", + "Abstyles", + "AdaCore-doc", + "Adobe-2006", + "Adobe-Glyph", + "Afmparse", + "Aladdin", + "Apache-1.0", + "Apache-1.1", + "Apache-2.0", + "App-s2p", + "Arphic-1999", + "Artistic-1.0", + "Artistic-1.0-Perl", + "Artistic-1.0-cl8", + "Artistic-2.0", + "BSD-1-Clause", + "BSD-2-Clause", + "BSD-2-Clause-Patent", + "BSD-2-Clause-Views", + "BSD-3-Clause", + "BSD-3-Clause-Attribution", + "BSD-3-Clause-Clear", + "BSD-3-Clause-LBNL", + "BSD-3-Clause-Modification", + "BSD-3-Clause-No-Military-License", + "BSD-3-Clause-No-Nuclear-License", + "BSD-3-Clause-No-Nuclear-License-2014", + "BSD-3-Clause-No-Nuclear-Warranty", + "BSD-3-Clause-Open-MPI", + "BSD-4-Clause", + "BSD-4-Clause-Shortened", + "BSD-4-Clause-UC", + "BSD-4.3RENO", + "BSD-4.3TAHOE", + "BSD-Advertising-Acknowledgement", + "BSD-Attribution-HPND-disclaimer", + "BSD-Protection", + "BSD-Source-Code", + "BSL-1.0", + "BUSL-1.1", + "Baekmuk", + "Bahyph", + "Barr", + "Beerware", + "BitTorrent-1.0", + "BitTorrent-1.1", + "Bitstream-Charter", + "Bitstream-Vera", + "BlueOak-1.0.0", + "Borceux", + "Brian-Gladman-3-Clause", + "C-UDA-1.0", + "CAL-1.0", + "CAL-1.0-Combined-Work-Exception", + "CATOSL-1.1", + "CC-BY-1.0", + "CC-BY-2.0", + "CC-BY-2.5", + "CC-BY-2.5-AU", + "CC-BY-3.0", + "CC-BY-3.0-AT", + "CC-BY-3.0-DE", + "CC-BY-3.0-IGO", + "CC-BY-3.0-NL", + "CC-BY-3.0-US", + "CC-BY-4.0", + "CC-BY-NC-1.0", + "CC-BY-NC-2.0", + "CC-BY-NC-2.5", + "CC-BY-NC-3.0", + "CC-BY-NC-3.0-DE", + "CC-BY-NC-4.0", + "CC-BY-NC-ND-1.0", + "CC-BY-NC-ND-2.0", + "CC-BY-NC-ND-2.5", + "CC-BY-NC-ND-3.0", + "CC-BY-NC-ND-3.0-DE", + "CC-BY-NC-ND-3.0-IGO", + "CC-BY-NC-ND-4.0", + "CC-BY-NC-SA-1.0", + "CC-BY-NC-SA-2.0", + "CC-BY-NC-SA-2.0-DE", + "CC-BY-NC-SA-2.0-FR", + "CC-BY-NC-SA-2.0-UK", + "CC-BY-NC-SA-2.5", + "CC-BY-NC-SA-3.0", + "CC-BY-NC-SA-3.0-DE", + "CC-BY-NC-SA-3.0-IGO", + "CC-BY-NC-SA-4.0", + "CC-BY-ND-1.0", + "CC-BY-ND-2.0", + "CC-BY-ND-2.5", + "CC-BY-ND-3.0", + "CC-BY-ND-3.0-DE", + "CC-BY-ND-4.0", + "CC-BY-SA-1.0", + "CC-BY-SA-2.0", + "CC-BY-SA-2.0-UK", + "CC-BY-SA-2.1-JP", + "CC-BY-SA-2.5", + "CC-BY-SA-3.0", + "CC-BY-SA-3.0-AT", + "CC-BY-SA-3.0-DE", + "CC-BY-SA-4.0", + "CC-PDDC", + "CC0-1.0", + "CDDL-1.0", + "CDDL-1.1", + "CDL-1.0", + "CDLA-Permissive-1.0", + "CDLA-Permissive-2.0", + "CDLA-Sharing-1.0", + "CECILL-1.0", + "CECILL-1.1", + "CECILL-2.0", + "CECILL-2.1", + "CECILL-B", + "CECILL-C", + "CERN-OHL-1.1", + "CERN-OHL-1.2", + "CERN-OHL-P-2.0", + "CERN-OHL-S-2.0", + "CERN-OHL-W-2.0", + "CFITSIO", + "CMU-Mach", + "CNRI-Jython", + "CNRI-Python", + "CNRI-Python-GPL-Compatible", + "COIL-1.0", + "CPAL-1.0", + "CPL-1.0", + "CPOL-1.02", + "CUA-OPL-1.0", + "Caldera", + "ClArtistic", + "Clips", + "Community-Spec-1.0", + "Condor-1.1", + "Cornell-Lossless-JPEG", + "Crossword", + "CrystalStacker", + "Cube", + "D-FSL-1.0", + "DL-DE-BY-2.0", + "DOC", + "DRL-1.0", + "DSDP", + "Dotseqn", + "ECL-1.0", + "ECL-2.0", + "EFL-1.0", + "EFL-2.0", + "EPICS", + "EPL-1.0", + "EPL-2.0", + "EUDatagrid", + "EUPL-1.0", + "EUPL-1.1", + "EUPL-1.2", + "Elastic-2.0", + "Entessa", + "ErlPL-1.1", + "Eurosym", + "FDK-AAC", + "FSFAP", + "FSFUL", + "FSFULLR", + "FSFULLRWD", + "FTL", + "Fair", + "Frameworx-1.0", + "FreeBSD-DOC", + "FreeImage", + "GD", + "GFDL-1.1-invariants-only", + "GFDL-1.1-invariants-or-later", + "GFDL-1.1-no-invariants-only", + "GFDL-1.1-no-invariants-or-later", + "GFDL-1.1-only", + "GFDL-1.1-or-later", + "GFDL-1.2-invariants-only", + "GFDL-1.2-invariants-or-later", + "GFDL-1.2-no-invariants-only", + "GFDL-1.2-no-invariants-or-later", + "GFDL-1.2-only", + "GFDL-1.2-or-later", + "GFDL-1.3-invariants-only", + "GFDL-1.3-invariants-or-later", + "GFDL-1.3-no-invariants-only", + "GFDL-1.3-no-invariants-or-later", + "GFDL-1.3-only", + "GFDL-1.3-or-later", + "GL2PS", + "GLWTPL", + "GPL-1.0-only", + "GPL-1.0-or-later", + "GPL-2.0-only", + "GPL-2.0-or-later", + "GPL-3.0-only", + "GPL-3.0-or-later", + "Giftware", + "Glide", + "Glulxe", + "Graphics-Gems", + "HP-1986", + "HPND", + "HPND-Markus-Kuhn", + "HPND-export-US", + "HPND-sell-variant", + "HPND-sell-variant-MIT-disclaimer", + "HTMLTIDY", + "HaskellReport", + "Hippocratic-2.1", + "IBM-pibs", + "ICU", + "IEC-Code-Components-EULA", + "IJG", + "IJG-short", + "IPA", + "IPL-1.0", + "ISC", + "ImageMagick", + "Imlib2", + "Info-ZIP", + "Intel", + "Intel-ACPI", + "Interbase-1.0", + "JPL-image", + "JPNIC", + "JSON", + "Jam", + "JasPer-2.0", + "Kazlib", + "Knuth-CTAN", + "LAL-1.2", + "LAL-1.3", + "LGPL-2.0-only", + "LGPL-2.0-or-later", + "LGPL-2.1-only", + "LGPL-2.1-or-later", + "LGPL-3.0-only", + "LGPL-3.0-or-later", + "LGPLLR", + "LOOP", + "LPL-1.0", + "LPL-1.02", + "LPPL-1.0", + "LPPL-1.1", + "LPPL-1.2", + "LPPL-1.3a", + "LPPL-1.3c", + "LZMA-SDK-9.11-to-9.20", + "LZMA-SDK-9.22", + "Latex2e", + "Leptonica", + "LiLiQ-P-1.1", + "LiLiQ-R-1.1", + "LiLiQ-Rplus-1.1", + "Libpng", + "Linux-OpenIB", + "Linux-man-pages-copyleft", + "MIT", + "MIT-0", + "MIT-CMU", + "MIT-Modern-Variant", + "MIT-Wu", + "MIT-advertising", + "MIT-enna", + "MIT-feh", + "MIT-open-group", + "MITNFA", + "MPL-1.0", + "MPL-1.1", + "MPL-2.0", + "MPL-2.0-no-copyleft-exception", + "MS-LPL", + "MS-PL", + "MS-RL", + "MTLL", + "MakeIndex", + "Martin-Birgmeier", + "Minpack", + "MirOS", + "Motosoto", + "MulanPSL-1.0", + "MulanPSL-2.0", + "Multics", + "Mup", + "NAIST-2003", + "NASA-1.3", + "NBPL-1.0", + "NCGL-UK-2.0", + "NCSA", + "NGPL", + "NICTA-1.0", + "NIST-PD", + "NIST-PD-fallback", + "NLOD-1.0", + "NLOD-2.0", + "NLPL", + "NOSL", + "NPL-1.0", + "NPL-1.1", + "NPOSL-3.0", + "NRL", + "NTP", + "NTP-0", + "Naumen", + "Net-SNMP", + "NetCDF", + "Newsletr", + "Nokia", + "Noweb", + "O-UDA-1.0", + "OCCT-PL", + "OCLC-2.0", + "ODC-By-1.0", + "ODbL-1.0", + "OFFIS", + "OFL-1.0", + "OFL-1.0-RFN", + "OFL-1.0-no-RFN", + "OFL-1.1", + "OFL-1.1-RFN", + "OFL-1.1-no-RFN", + "OGC-1.0", + "OGDL-Taiwan-1.0", + "OGL-Canada-2.0", + "OGL-UK-1.0", + "OGL-UK-2.0", + "OGL-UK-3.0", + "OGTSL", + "OLDAP-1.1", + "OLDAP-1.2", + "OLDAP-1.3", + "OLDAP-1.4", + "OLDAP-2.0", + "OLDAP-2.0.1", + "OLDAP-2.1", + "OLDAP-2.2", + "OLDAP-2.2.1", + "OLDAP-2.2.2", + "OLDAP-2.3", + "OLDAP-2.4", + "OLDAP-2.5", + "OLDAP-2.6", + "OLDAP-2.7", + "OLDAP-2.8", + "OML", + "OPL-1.0", + "OPUBL-1.0", + "OSET-PL-2.1", + "OSL-1.0", + "OSL-1.1", + "OSL-2.0", + "OSL-2.1", + "OSL-3.0", + "OpenPBS-2.3", + "OpenSSL", + "PDDL-1.0", + "PHP-3.0", + "PHP-3.01", + "PSF-2.0", + "Parity-6.0.0", + "Parity-7.0.0", + "Plexus", + "PolyForm-Noncommercial-1.0.0", + "PolyForm-Small-Business-1.0.0", + "PostgreSQL", + "Python-2.0", + "Python-2.0.1", + "QPL-1.0", + "QPL-1.0-INRIA-2004", + "Qhull", + "RHeCos-1.1", + "RPL-1.1", + "RPL-1.5", + "RPSL-1.0", + "RSA-MD", + "RSCPL", + "Rdisc", + "Ruby", + "SAX-PD", + "SCEA", + "SGI-B-1.0", + "SGI-B-1.1", + "SGI-B-2.0", + "SHL-0.5", + "SHL-0.51", + "SISSL", + "SISSL-1.2", + "SMLNJ", + "SMPPL", + "SNIA", + "SPL-1.0", + "SSH-OpenSSH", + "SSH-short", + "SSPL-1.0", + "SWL", + "Saxpath", + "SchemeReport", + "Sendmail", + "Sendmail-8.23", + "SimPL-2.0", + "Sleepycat", + "Spencer-86", + "Spencer-94", + "Spencer-99", + "SugarCRM-1.1.3", + "SunPro", + "Symlinks", + "TAPR-OHL-1.0", + "TCL", + "TCP-wrappers", + "TMate", + "TORQUE-1.1", + "TOSL", + "TPDL", + "TPL-1.0", + "TTWL", + "TU-Berlin-1.0", + "TU-Berlin-2.0", + "UCAR", + "UCL-1.0", + "UPL-1.0", + "Unicode-DFS-2015", + "Unicode-DFS-2016", + "Unicode-TOU", + "Unlicense", + "VOSTROM", + "VSL-1.0", + "Vim", + "W3C", + "W3C-19980720", + "W3C-20150513", + "WTFPL", + "Watcom-1.0", + "Wsuipa", + "X11", + "X11-distribute-modifications-variant", + "XFree86-1.1", + "XSkat", + "Xerox", + "Xnet", + "YPL-1.0", + "YPL-1.1", + "ZPL-1.1", + "ZPL-2.0", + "ZPL-2.1", + "Zed", + "Zend-2.0", + "Zimbra-1.3", + "Zimbra-1.4", + "Zlib", + "blessing", + "bzip2-1.0.6", + "checkmk", + "copyleft-next-0.3.0", + "copyleft-next-0.3.1", + "curl", + "diffmark", + "dvipdfm", + "eGenix", + "etalab-2.0", + "gSOAP-1.3b", + "gnuplot", + "iMatix", + "libpng-2.0", + "libselinux-1.0", + "libtiff", + "libutil-David-Nugent", + "mpi-permissive", + "mpich2", + "mplus", + "psfrag", + "psutils", + "snprintf", + "w3m", + "xinetd", + "xlock", + "xpp", + "zlib-acknowledgement" + ], + "title": "SPDXLicenseEnum" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/galaxy.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": ["galaxy.yml"], + "properties": { + "authors": { + "items": { + "type": "string" + }, + "title": "Authors", + "type": "array" + }, + "build_ignore": { + "items": { + "type": "string" + }, + "title": "Build Ignore", + "type": "array" + }, + "dependencies": { + "additionalProperties": { + "$ref": "#/$defs/CollectionVersionConstraintModel" + }, + "title": "Dependencies", + "type": "object" + }, + "description": { + "title": "Description", + "type": "string" + }, + "documentation": { + "title": "Documentation", + "type": "string" + }, + "homepage": { + "title": "Homepage", + "type": "string" + }, + "issues": { + "title": "Issues", + "type": "string" + }, + "license": { + "items": { + "$ref": "#/$defs/SPDXLicense" + }, + "title": "License", + "type": "array" + }, + "license_file": { + "title": "License File", + "type": "string" + }, + "manifest": { + "additionalProperties": false, + "markdownDescription": "A dict controlling use of manifest directives used in building the collection artifact.\nThe key directives is a list of MANIFEST.in style directives\nThe key omit_default_directives is a boolean that controls whether the default directives are used.\nMutually exclusive with build_ignore.", + "properties": { + "directives": { + "items": { + "type": "string" + }, + "title": "Directives", + "type": "array" + }, + "omit_default_directives": { + "title": "Omit Default Directives", + "type": "boolean" + } + }, + "title": "Manifest", + "type": "object" + }, + "name": { + "minLength": 2, + "pattern": "^[a-z][a-z0-9_]+$", + "title": "Name", + "type": "string" + }, + "namespace": { + "minLength": 2, + "pattern": "^[a-z][a-z0-9_]+$", + "title": "Namespace", + "type": "string" + }, + "readme": { + "markdownDescription": "The path to the Markdown (.md) readme file. This path is relative to the root of the collection.\nSee [metadata structure](https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html)", + "title": "Readme", + "type": "string" + }, + "repository": { + "title": "Repository", + "type": "string" + }, + "tags": { + "items": { + "type": "string" + }, + "title": "Tags", + "type": "array" + }, + "version": { + "markdownDescription": "Version must use [SemVer](https://semver.org/) format, which is more restrictive than [PEP-440](https://peps.python.org/pep-0440/). For example `1.0.0-rc1` is valid but `1.0.0rc` is not.", + "minLength": 5, + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", + "title": "Version", + "type": "string" + } + }, + "required": [ + "namespace", + "name", + "version", + "readme", + "authors", + "description", + "repository" + ], + "title": "Ansible galaxy.yml Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/inventory.json b/src/ansiblelint/schemas/inventory.json new file mode 100644 index 0000000..80333ce --- /dev/null +++ b/src/ansiblelint/schemas/inventory.json @@ -0,0 +1,66 @@ +{ + "$defs": { + "group": { + "properties": { + "children": { + "patternProperties": { + "[a-zA-Z-_0-9]": { + "$ref": "#/$defs/group" + } + } + }, + "hosts": { + "patternProperties": { + "[a-zA-Z.-_0-9]": { + "type": ["object", "null"] + } + }, + "type": ["object", "string"] + }, + "vars": { + "type": "object" + } + }, + "type": ["object", "null"] + }, + "special-group": { + "additionalProperties": false, + "properties": { + "children": { + "type": ["object", "null"] + }, + "groups": { + "type": ["object", "null"] + }, + "hosts": { + "type": ["object", "null"] + }, + "vars": { + "type": ["object", "null"] + } + }, + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/inventory.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": true, + "description": "Ansible Inventory Schema", + "examples": [ + "inventory.yaml", + "inventory.yml", + "inventory/*.yml", + "inventory/*.yaml" + ], + "markdownDescription": "All keys at top levels are groups with `all` and `ungrouped` having a special meaning.\n\nSee [How to build your inventory](https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html)", + "properties": { + "all": { + "$ref": "#/$defs/special-group" + }, + "ungrouped": { + "$ref": "#/$defs/group" + } + }, + "title": "Ansible Inventory Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/main.py b/src/ansiblelint/schemas/main.py new file mode 100644 index 0000000..590aea3 --- /dev/null +++ b/src/ansiblelint/schemas/main.py @@ -0,0 +1,37 @@ +"""Module containing cached JSON schemas.""" +from __future__ import annotations + +import json +import logging +from typing import TYPE_CHECKING + +import jsonschema +import yaml +from jsonschema.exceptions import ValidationError + +from ansiblelint.loaders import yaml_load_safe +from ansiblelint.schemas.__main__ import JSON_SCHEMAS, _schema_cache + +_logger = logging.getLogger(__package__) + +if TYPE_CHECKING: + from ansiblelint.file_utils import Lintable + + +def validate_file_schema(file: Lintable) -> list[str]: + """Return list of JSON validation errors found.""" + if file.kind not in JSON_SCHEMAS: + return [f"Unable to find JSON Schema '{file.kind}' for '{file.path}' file."] + try: + # convert yaml to json (keys are converted to strings) + yaml_data = yaml_load_safe(file.content) + json_data = json.loads(json.dumps(yaml_data)) + jsonschema.validate( + instance=json_data, + schema=_schema_cache[file.kind], + ) + except yaml.constructor.ConstructorError as exc: + return [f"Failed to load YAML file '{file.path}': {exc.problem}"] + except ValidationError as exc: + return [exc.message] + return [] diff --git a/src/ansiblelint/schemas/meta-runtime.json b/src/ansiblelint/schemas/meta-runtime.json new file mode 100644 index 0000000..617647f --- /dev/null +++ b/src/ansiblelint/schemas/meta-runtime.json @@ -0,0 +1,82 @@ +{ + "$defs": { + "ActionGroup": { + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "$ref": "#/$defs/Metadata" + } + ] + }, + "type": "array" + }, + "Metadata": { + "properties": { + "metadata": { + "properties": { + "extend_group": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "Redirect": { + "properties": { + "redirect": { + "type": "string" + } + }, + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/meta-runtime.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "description": "See https://docs.ansible.com/ansible/devel/dev_guide/developing_collections_structure.html#meta-directory", + "examples": ["**/meta/runtime.yml"], + "properties": { + "action_groups": { + "additionalProperties": { + "$ref": "#/$defs/ActionGroup" + }, + "description": "A mapping of groups and the list of action plugin and module names they contain. They may also have a special ‘metadata’ dictionary in the list, which can be used to include actions from other groups.", + "title": "Action Groups", + "type": "object" + }, + "import_redirection": { + "additionalProperties": { + "$ref": "#/$defs/Redirect" + }, + "description": "A mapping of names for Python import statements and their redirected locations.", + "title": "Import Redirection", + "type": "object" + }, + "plugin_routing": { + "markdownDescription": "Content in a collection that Ansible needs to load from another location or that has been deprecated/removed. The top level keys of plugin_routing are types of plugins, with individual plugin names as subkeys. To define a new location for a plugin, set the redirect field to another name. To deprecate a plugin, use the deprecation field to provide a custom warning message and the removal version or date. If the plugin has been renamed or moved to a new location, the redirect field should also be provided. If a plugin is being removed entirely, tombstone can be used for the fatal error message and removal version or date.", + "properties": { + "inventory": {}, + "module_utils": {}, + "modules": {} + }, + "title": "Plugin Routing", + "type": "object" + }, + "requires_ansible": { + "examples": [">=2.10,<2.11"], + "pattern": "^[^\\s]*$", + "title": "The version of Ansible Core (ansible-core) required to use the collection. Multiple versions can be separated with a comma.", + "type": "string" + } + }, + "title": "Ansible Meta Runtime Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/meta.json b/src/ansiblelint/schemas/meta.json new file mode 100644 index 0000000..384d113 --- /dev/null +++ b/src/ansiblelint/schemas/meta.json @@ -0,0 +1,1473 @@ +{ + "$defs": { + "AIXPlatformModel": { + "properties": { + "name": { + "const": "AIX", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["6.1", "7.1", "7.2", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "AIXPlatformModel", + "type": "object" + }, + "AlpinePlatformModel": { + "properties": { + "name": { + "const": "Alpine", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "AlpinePlatformModel", + "type": "object" + }, + "AmazonLinuxPlatformModel": { + "properties": { + "name": { + "const": "Amazon Linux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all", "1", "2", "2023"], + "type": "string" + }, + "type": "array" + } + }, + "title": "Amazon Linux 2PlatformModel", + "type": "object" + }, + "AmazonPlatformModel": { + "properties": { + "name": { + "const": "Amazon", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "2013.03", + "2013.09", + "2014.03", + "2014.09", + "2015.03", + "2015.09", + "2016.03", + "2016.09", + "2017.03", + "2017.09", + "2017.12", + "2018.03", + "Candidate", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "AmazonPlatformModel", + "type": "object" + }, + "ArchLinuxPlatformModel": { + "properties": { + "name": { + "const": "ArchLinux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "ArchLinuxPlatformModel", + "type": "object" + }, + "ClearLinuxPlatformModel": { + "properties": { + "name": { + "const": "ClearLinux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "ClearLinuxPlatformModel", + "type": "object" + }, + "CumulusPlatformModel": { + "properties": { + "name": { + "const": "Cumulus", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["2.5", "3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "CumulusPlatformModel", + "type": "object" + }, + "DebianPlatformModel": { + "properties": { + "name": { + "const": "Debian", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "bookworm", + "bullseye", + "buster", + "etch", + "jessie", + "lenny", + "sid", + "squeeze", + "stretch", + "wheezy", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "DebianPlatformModel", + "type": "object" + }, + "DellOSPlatformModel": { + "properties": { + "name": { + "const": "DellOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["10", "6", "9", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "DellOSPlatformModel", + "type": "object" + }, + "DependencyModel": { + "additionalProperties": true, + "anyOf": [ + { + "required": ["role"] + }, + { + "required": ["src"] + }, + { + "required": ["name"] + } + ], + "markdownDescription": "See https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_reuse_roles.html#role-dependencies and https://github.com/ansible/ansible/blob/devel/lib/ansible/playbook/role/metadata.py#L79\n\nOther keys are treated as role [parameters](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#passing-different-parameters).", + "properties": { + "become": { + "title": "Become", + "type": "boolean" + }, + "name": { + "title": "Name", + "type": "string" + }, + "role": { + "title": "Role", + "type": "string" + }, + "scm": { + "enum": ["hg", "git"], + "title": "Scm", + "type": "string" + }, + "src": { + "title": "Src", + "type": "string" + }, + "tags": { + "items": { + "type": "string" + }, + "title": "Tags", + "type": ["array", "string"] + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "version": { + "title": "Version", + "type": "string" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "title": "Dependency entry", + "type": "object" + }, + "DevuanPlatformModel": { + "properties": { + "name": { + "const": "Devuan", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["ascii", "beowulf", "ceres", "jessie", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "DevuanPlatformModel", + "type": "object" + }, + "DragonFlyBSDPlatformModel": { + "properties": { + "name": { + "const": "DragonFlyBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["5.2", "5.4", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "DragonFlyBSDPlatformModel", + "type": "object" + }, + "ELPlatformModel": { + "properties": { + "name": { + "const": "EL", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["5", "6", "7", "8", "9", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "ELPlatformModel", + "type": "object" + }, + "FedoraPlatformModel": { + "properties": { + "name": { + "const": "Fedora", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "16", + "17", + "18", + "19", + "20", + "21", + "22", + "23", + "24", + "25", + "26", + "27", + "28", + "29", + "30", + "31", + "32", + "33", + "34", + "35", + "36", + "37", + "38", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "FedoraPlatformModel", + "type": "object" + }, + "FreeBSDPlatformModel": { + "properties": { + "name": { + "const": "FreeBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "10.0", + "10.1", + "10.2", + "10.3", + "10.4", + "11.0", + "11.1", + "11.2", + "11.3", + "11.4", + "12.0", + "12.1", + "12.2", + "13.0", + "13.1", + "13.2", + "14.0", + "8.0", + "8.1", + "8.2", + "8.3", + "8.4", + "9.0", + "9.1", + "9.2", + "9.3", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "FreeBSDPlatformModel", + "type": "object" + }, + "GalaxyInfoModel": { + "additionalProperties": false, + "allOf": [ + { + "if": { + "properties": { + "standalone": { + "const": true + } + } + }, + "then": { + "$comment": "Standalone role, so we require several fields.", + "required": [ + "author", + "description", + "license", + "min_ansible_version" + ] + } + }, + { + "if": { + "properties": { + "standalone": { + "const": false + } + } + }, + "then": { + "$comment": "Collection roles do not use most galaxy fields.", + "not": { + "required": [ + "cloud_platforms", + "galaxy_tags", + "min_ansible_version", + "namespace", + "platforms", + "role_name", + "video_links" + ] + }, + "required": ["description"] + } + } + ], + "else": { + "$comment": "If standalone is false, then we have a collection role and only description is required", + "required": ["description"] + }, + "properties": { + "author": { + "title": "Author", + "type": "string" + }, + "cloud_platforms": { + "markdownDescription": "Only valid for old standalone roles." + }, + "company": { + "title": "Company", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "galaxy_tags": { + "items": { + "type": "string" + }, + "markdownDescription": "See https://galaxy.ansible.com/docs/contributing/creating_role.html", + "title": "Galaxy Tags", + "type": "array" + }, + "github_branch": { + "markdownDescription": "Optionally specify the branch Galaxy will use when accessing the GitHub repo for this role", + "title": "GitHub Branch", + "type": "string" + }, + "issue_tracker_url": { + "title": "Issue Tracker Url", + "type": "string" + }, + "license": { + "title": "License", + "type": "string" + }, + "min_ansible_container_version": { + "title": "Min Ansible Container Version", + "type": "string" + }, + "min_ansible_version": { + "title": "Min Ansible Version", + "type": "string" + }, + "namespace": { + "markdownDescription": "Used by molecule and ansible-lint to compute FQRN for roles outside collections", + "minLength": 2, + "pattern": "^[a-z][a-z0-9_]+$", + "title": "Namespace Name", + "type": "string" + }, + "platforms": { + "$ref": "#/$defs/platforms" + }, + "role_name": { + "minLength": 2, + "pattern": "^[a-z][a-z0-9_]+$", + "title": "Role Name", + "type": "string" + }, + "standalone": { + "description": "Set to true for old standalone roles, or false for new collection roles.", + "title": "Standalone", + "type": "boolean" + }, + "video_links": { + "markdownDescription": "Only valid for old standalone roles.", + "type": "array" + } + }, + "title": "GalaxyInfoModel", + "type": "object" + }, + "GenericBSDPlatformModel": { + "properties": { + "name": { + "const": "GenericBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "GenericBSDPlatformModel", + "type": "object" + }, + "GenericLinuxPlatformModel": { + "properties": { + "name": { + "const": "GenericLinux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "GenericLinuxPlatformModel", + "type": "object" + }, + "GenericUNIXPlatformModel": { + "properties": { + "name": { + "const": "GenericUNIX", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "GenericUNIXPlatformModel", + "type": "object" + }, + "GentooPlatformModel": { + "properties": { + "name": { + "const": "Gentoo", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "GentooPlatformModel", + "type": "object" + }, + "HardenedBSDPlatformModel": { + "properties": { + "name": { + "const": "HardenedBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["10", "11", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "HardenedBSDPlatformModel", + "type": "object" + }, + "IOSPlatformModel": { + "properties": { + "name": { + "const": "IOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "IOSPlatformModel", + "type": "object" + }, + "JunosPlatformModel": { + "properties": { + "name": { + "const": "Junos", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "JunosPlatformModel", + "type": "object" + }, + "KaliPlatformModel": { + "properties": { + "name": { + "const": "Kali", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "2.0", + "2016", + "2017", + "2018", + "2019", + "2020", + "2021", + "2022", + "2023", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "KaliPlatformModel", + "type": "object" + }, + "MacOSXPlatformModel": { + "properties": { + "name": { + "const": "MacOSX", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "10.10", + "10.11", + "10.12", + "10.13", + "10.14", + "10.15", + "10.7", + "10.8", + "10.9", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "MacOSXPlatformModel", + "type": "object" + }, + "MageiaPlatformModel": { + "properties": { + "name": { + "const": "Mageia", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["7", "8", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "MageiaPlatformModel", + "type": "object" + }, + "NXOSPlatformModel": { + "properties": { + "name": { + "const": "NXOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "NXOSPlatformModel", + "type": "object" + }, + "NetBSDPlatformModel": { + "properties": { + "name": { + "const": "NetBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "8.0", + "8.1", + "8.2", + "9.0", + "9.1", + "9.2", + "9.3", + "10.0", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "NetBSDPlatformModel", + "type": "object" + }, + "OpenBSDPlatformModel": { + "properties": { + "name": { + "const": "OpenBSD", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "5.6", + "5.7", + "5.8", + "5.9", + "6.0", + "6.1", + "6.2", + "6.3", + "6.4", + "6.5", + "6.6", + "6.7", + "6.8", + "6.9", + "7.0", + "7.1", + "7.2", + "7.3", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "OpenBSDPlatformModel", + "type": "object" + }, + "OpenWrtPlatformModel": { + "properties": { + "name": { + "const": "OpenWrt", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["17.01", "18.06", "19.07", "21.02", "22.03", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "OpenWrtPlatformModel", + "type": "object" + }, + "OracleLinuxPlatformModel": { + "properties": { + "name": { + "const": "OracleLinux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "7.0", + "7.1", + "7.2", + "7.3", + "7.4", + "7.5", + "7.6", + "7.7", + "7.8", + "7.9", + "8.0", + "8.1", + "8.2", + "8.3", + "8.4", + "8.5", + "8.6", + "8.7", + "8.8", + "9.0", + "9.1", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "OracleLinuxPlatformModel", + "type": "object" + }, + "PAN-OSPlatformModel": { + "properties": { + "name": { + "const": "PAN-OS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["7.1", "8.0", "8.1", "9.0", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "PAN-OSPlatformModel", + "type": "object" + }, + "SLESPlatformModel": { + "properties": { + "name": { + "const": "SLES", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "10SP3", + "10SP4", + "11", + "11SP1", + "11SP2", + "11SP3", + "11SP4", + "12", + "12SP1", + "12SP2", + "12SP3", + "12SP4", + "12SP5", + "15", + "15SP1", + "15SP2", + "15SP3", + "15SP4", + "15SP5", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "SLESPlatformModel", + "type": "object" + }, + "SmartOSPlatformModel": { + "properties": { + "name": { + "const": "SmartOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "SmartOSPlatformModel", + "type": "object" + }, + "SolarisPlatformModel": { + "properties": { + "name": { + "const": "Solaris", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["10", "11.0", "11.1", "11.2", "11.3", "11.4", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "SolarisPlatformModel", + "type": "object" + }, + "SynologyPlatformModel": { + "properties": { + "name": { + "const": "Synology", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["6.0", "6.1", "6.2", "7.0", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "SynologyPlatformModel", + "type": "object" + }, + "TMOSPlatformModel": { + "properties": { + "name": { + "const": "TMOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["12.1", "13.0", "13.1", "14.0", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "TMOSPlatformModel", + "type": "object" + }, + "UbuntuPlatformModel": { + "properties": { + "name": { + "const": "Ubuntu", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "artful", + "bionic", + "cosmic", + "cuttlefish", + "disco", + "eoan", + "focal", + "groovy", + "hirsute", + "impish", + "jammy", + "lucid", + "maverick", + "natty", + "oneiric", + "precise", + "quantal", + "raring", + "saucy", + "trusty", + "utopic", + "vivid", + "wily", + "xenial", + "yakkety", + "zesty", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "UbuntuPlatformModel", + "type": "object" + }, + "Void_LinuxPlatformModel": { + "properties": { + "name": { + "const": "Void Linux", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "Void LinuxPlatformModel", + "type": "object" + }, + "WindowsPlatformModel": { + "properties": { + "name": { + "const": "Windows", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "2008R2", + "2008x64", + "2008x86", + "2012", + "2012R2", + "2016", + "2019", + "2022", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "WindowsPlatformModel", + "type": "object" + }, + "aosPlatformModel": { + "properties": { + "name": { + "const": "aos", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "aosPlatformModel", + "type": "object" + }, + "collections": { + "items": { + "markdownDescription": "See [Using collections in roles](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-roles) and [collection naming conventions](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_in_groups.html#naming-conventions)", + "pattern": "^[a-z_]+\\.[a-z_]+$", + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "complex_conditional": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "eosPlatformModel": { + "properties": { + "name": { + "const": "eos", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "eosPlatformModel", + "type": "object" + }, + "macOSPlatformModel": { + "properties": { + "name": { + "const": "macOS", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "Big-Sur", + "Catalina", + "High-Sierra", + "Mojave", + "Monterey", + "Sierra", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "macOSPlatformModel", + "type": "object" + }, + "opensusePlatformModel": { + "properties": { + "name": { + "const": "opensuse", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": [ + "12.1", + "12.2", + "12.3", + "13.1", + "13.2", + "15.0", + "15.1", + "15.2", + "15.3", + "15.4", + "15.5", + "42.1", + "42.2", + "42.3", + "all" + ], + "type": "string" + }, + "type": "array" + } + }, + "title": "opensusePlatformModel", + "type": "object" + }, + "os10PlatformModel": { + "properties": { + "name": { + "const": "os10", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "os10PlatformModel", + "type": "object" + }, + "platforms": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/AIXPlatformModel" + }, + { + "$ref": "#/$defs/AlpinePlatformModel" + }, + { + "$ref": "#/$defs/AmazonPlatformModel" + }, + { + "$ref": "#/$defs/AmazonLinuxPlatformModel" + }, + { + "$ref": "#/$defs/aosPlatformModel" + }, + { + "$ref": "#/$defs/ArchLinuxPlatformModel" + }, + { + "$ref": "#/$defs/ClearLinuxPlatformModel" + }, + { + "$ref": "#/$defs/CumulusPlatformModel" + }, + { + "$ref": "#/$defs/NetBSDPlatformModel" + }, + { + "$ref": "#/$defs/DebianPlatformModel" + }, + { + "$ref": "#/$defs/DellOSPlatformModel" + }, + { + "$ref": "#/$defs/DevuanPlatformModel" + }, + { + "$ref": "#/$defs/DragonFlyBSDPlatformModel" + }, + { + "$ref": "#/$defs/ELPlatformModel" + }, + { + "$ref": "#/$defs/eosPlatformModel" + }, + { + "$ref": "#/$defs/FedoraPlatformModel" + }, + { + "$ref": "#/$defs/FreeBSDPlatformModel" + }, + { + "$ref": "#/$defs/GenericBSDPlatformModel" + }, + { + "$ref": "#/$defs/GenericLinuxPlatformModel" + }, + { + "$ref": "#/$defs/GenericUNIXPlatformModel" + }, + { + "$ref": "#/$defs/GentooPlatformModel" + }, + { + "$ref": "#/$defs/HardenedBSDPlatformModel" + }, + { + "$ref": "#/$defs/IOSPlatformModel" + }, + { + "$ref": "#/$defs/JunosPlatformModel" + }, + { + "$ref": "#/$defs/KaliPlatformModel" + }, + { + "$ref": "#/$defs/macOSPlatformModel" + }, + { + "$ref": "#/$defs/MacOSXPlatformModel" + }, + { + "$ref": "#/$defs/MageiaPlatformModel" + }, + { + "$ref": "#/$defs/NXOSPlatformModel" + }, + { + "$ref": "#/$defs/OpenBSDPlatformModel" + }, + { + "$ref": "#/$defs/opensusePlatformModel" + }, + { + "$ref": "#/$defs/OpenWrtPlatformModel" + }, + { + "$ref": "#/$defs/OracleLinuxPlatformModel" + }, + { + "$ref": "#/$defs/os10PlatformModel" + }, + { + "$ref": "#/$defs/PAN-OSPlatformModel" + }, + { + "$ref": "#/$defs/SLESPlatformModel" + }, + { + "$ref": "#/$defs/SmartOSPlatformModel" + }, + { + "$ref": "#/$defs/SolarisPlatformModel" + }, + { + "$ref": "#/$defs/SynologyPlatformModel" + }, + { + "$ref": "#/$defs/TMOSPlatformModel" + }, + { + "$ref": "#/$defs/UbuntuPlatformModel" + }, + { + "$ref": "#/$defs/vCenterPlatformModel" + }, + { + "$ref": "#/$defs/Void_LinuxPlatformModel" + }, + { + "$ref": "#/$defs/vSpherePlatformModel" + }, + { + "$ref": "#/$defs/WindowsPlatformModel" + } + ] + }, + "title": "Platforms", + "type": "array" + }, + "vCenterPlatformModel": { + "properties": { + "name": { + "const": "vCenter", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["5.5", "6.0", "6.5", "6.7", "7.0", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "vCenterPlatformModel", + "type": "object" + }, + "vSpherePlatformModel": { + "properties": { + "name": { + "const": "vSphere", + "title": "Name", + "type": "string" + }, + "versions": { + "default": "all", + "items": { + "enum": ["5.5", "6.0", "6.5", "6.7", "7.0", "all"], + "type": "string" + }, + "type": "array" + } + }, + "title": "vSpherePlatformModel", + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/meta.json", + "$schema": "http://json-schema.org/draft-07/schema", + "examples": ["meta/main.yml"], + "properties": { + "additionalProperties": false, + "allow_duplicates": { + "title": "Allow Duplicates", + "type": "boolean" + }, + "collections": { + "$ref": "#/$defs/collections" + }, + "dependencies": { + "items": { + "$ref": "#/$defs/DependencyModel" + }, + "title": "Dependencies", + "type": "array" + }, + "galaxy_info": { + "$ref": "#/$defs/GalaxyInfoModel" + } + }, + "title": "Ansible Meta Schema v1/v2", + "type": ["object", "null"] +} diff --git a/src/ansiblelint/schemas/molecule.json b/src/ansiblelint/schemas/molecule.json new file mode 100644 index 0000000..d957f08 --- /dev/null +++ b/src/ansiblelint/schemas/molecule.json @@ -0,0 +1,561 @@ +{ + "$defs": { + "ContainerRegistryModel": { + "additionalProperties": false, + "properties": { + "url": { + "title": "Url", + "type": "string" + } + }, + "required": ["url"], + "title": "ContainerRegistryModel", + "type": "object" + }, + "MoleculeDependencyModel": { + "additionalProperties": false, + "properties": { + "command": { + "title": "Command", + "type": ["string", "null"] + }, + "enabled": { + "default": true, + "title": "Enabled", + "type": "boolean" + }, + "env": { + "title": "Env", + "type": "object" + }, + "name": { + "enum": ["galaxy", "shell"], + "title": "Name", + "type": "string" + }, + "options": { + "title": "Options", + "type": "object" + } + }, + "required": ["name"], + "title": "MoleculeDependencyModel", + "type": "object" + }, + "MoleculeDriverModel": { + "additionalProperties": false, + "properties": { + "cachier": { + "title": "Cachier", + "type": "string" + }, + "default_box": { + "title": "DefaultBox", + "type": "string" + }, + "name": { + "enum": [ + "azure", + "ec2", + "delegated", + "docker", + "containers", + "openstack", + "podman", + "vagrant", + "digitalocean", + "gce", + "libvirt", + "lxd" + ], + "title": "Name", + "type": "string" + }, + "options": { + "$ref": "#/$defs/MoleculeDriverOptionsModel" + }, + "parallel": { + "title": "Parallel", + "type": "boolean" + }, + "provider": { + "title": "Provider", + "type": "object" + }, + "provision": { + "title": "Provision", + "type": "boolean" + }, + "safe_files": { + "items": { + "type": "string" + }, + "title": "SafeFiles", + "type": "array" + }, + "ssh_connection_options": { + "items": { + "type": "string" + }, + "title": "SshConnectionOptions", + "type": "array" + } + }, + "title": "MoleculeDriverModel", + "type": "object" + }, + "MoleculeDriverOptionsModel": { + "additionalProperties": false, + "properties": { + "ansible_connection_options": { + "additionalProperties": { + "type": "string" + }, + "title": "Ansible Connection Options", + "type": "object" + }, + "login_cmd_template": { + "title": "Login Cmd Template", + "type": "string" + }, + "managed": { + "title": "Managed", + "type": "boolean" + } + }, + "title": "MoleculeDriverOptionsModel", + "type": "object" + }, + "MoleculePlatformModel": { + "additionalProperties": true, + "properties": { + "box": { + "title": "Box", + "type": "string" + }, + "cgroupns": { + "title": "Cgroupns", + "type": "string" + }, + "children": { + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "title": "Command", + "type": "string" + }, + "cpus": { + "title": "Cpus", + "type": "integer" + }, + "dockerfile": { + "title": "Dockerfile", + "type": "string" + }, + "env": { + "items": { + "type": "object" + }, + "title": "Platform Environment Variables", + "type": "array" + }, + "environment": { + "additionalProperties": { + "type": "string" + }, + "title": "Environment", + "type": "object" + }, + "groups": { + "items": { + "type": "string" + }, + "title": "Groups", + "type": "array" + }, + "hostname": { + "title": "Hostname", + "type": ["string", "boolean"] + }, + "image": { + "title": "Image", + "type": ["string", "null"] + }, + "interfaces": { + "title": "Interfaces", + "type": "array" + }, + "memory": { + "title": "Memory", + "type": "integer" + }, + "name": { + "title": "Name", + "type": "string" + }, + "network_mode": { + "anyOf": [ + { + "enum": ["bridge", "host", "none"], + "type": "string" + }, + { + "pattern": "^service:[a-zA-Z0-9:_.\\\\-]+$", + "type": "string" + }, + { + "pattern": "^container:[a-zA-Z0-9][a-zA-Z0-9_.-]+$", + "type": "string" + } + ], + "title": "Network Mode" + }, + "networks": { + "items": { + "$ref": "#/$defs/platform-network" + }, + "markdownDescription": "Used by docker and podman drivers.", + "title": "Networks", + "type": "array" + }, + "pkg_extras": { + "title": "Pkg Extras", + "type": "string" + }, + "pre_build_image": { + "title": "Pre Build Image", + "type": "boolean" + }, + "privileged": { + "title": "Privileged", + "type": "boolean" + }, + "provider_options": { + "title": "Provider options", + "type": "object" + }, + "provider_raw_config_args": { + "items": { + "type": "string" + }, + "title": "Provider Raw Config Args", + "type": "array" + }, + "registry": { + "$ref": "#/$defs/ContainerRegistryModel" + }, + "tmpfs": { + "items": { + "type": "string" + }, + "title": "Tmpfs", + "type": "array" + }, + "ulimits": { + "items": { + "type": "string" + }, + "title": "Ulimits", + "type": "array" + }, + "volumes": { + "items": { + "type": "string" + }, + "title": "Volumes", + "type": "array" + } + }, + "required": ["name"], + "title": "MoleculePlatformModel", + "type": "object" + }, + "MoleculeScenarioModel": { + "additionalProperties": false, + "properties": { + "check_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "cleanup_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "converge_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "create_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "dependency_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "destroy_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "idempotence_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "lint_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "name": { + "title": "Name", + "type": "string" + }, + "prepare_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "side_effect_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "syntax_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "test_sequence": { + "$ref": "#/$defs/ScenarioSequence" + }, + "verify_sequence": { + "$ref": "#/$defs/ScenarioSequence" + } + }, + "title": "MoleculeScenarioModel", + "type": "object" + }, + "ProvisionerConfigOptionsDefaultsModel": { + "additionalProperties": true, + "properties": { + "ansible_managed": { + "default": "Ansible managed: Do NOT edit this file manually!", + "title": "Ansible Managed", + "type": "string" + }, + "display_failed_stderr": { + "default": true, + "title": "Display Failed Stderr", + "type": "boolean" + }, + "fact_caching": { + "title": "Fact Caching", + "type": "string" + }, + "fact_caching_connection": { + "title": "Fact Caching Connection", + "type": "string" + }, + "forks": { + "default": 50, + "title": "Forks", + "type": "integer" + }, + "host_key_checking": { + "default": false, + "title": "Host Key Checking", + "type": "boolean" + }, + "interpreter_python": { + "default": "auto_silent", + "description": "See https://docs.ansible.com/ansible/devel/reference_appendices/interpreter_discovery.html", + "title": "Interpreter Python", + "type": "string" + }, + "nocows": { + "default": 1, + "title": "Nocows", + "type": "integer" + }, + "retry_files_enabled": { + "default": false, + "title": "Retry Files Enabled", + "type": "boolean" + } + }, + "title": "ProvisionerConfigOptionsDefaultsModel", + "type": "object" + }, + "ProvisionerConfigOptionsModel": { + "additionalProperties": true, + "properties": { + "defaults": { + "$ref": "#/$defs/ProvisionerConfigOptionsDefaultsModel" + }, + "ssh_connection": { + "$ref": "#/$defs/ProvisionerConfigOptionsSshConnectionModel" + } + }, + "title": "ProvisionerConfigOptionsModel", + "type": "object" + }, + "ProvisionerConfigOptionsSshConnectionModel": { + "additionalProperties": false, + "properties": { + "control_path": { + "default": "%(directory)s/%%h-%%p-%%r", + "title": "Control Path", + "type": "string" + }, + "scp_if_ssh": { + "default": true, + "title": "Scp If Ssh", + "type": "boolean" + } + }, + "title": "ProvisionerConfigOptionsSshConnectionModel", + "type": "object" + }, + "ProvisionerModel": { + "additionalProperties": true, + "properties": { + "config_options": { + "$ref": "#/$defs/ProvisionerConfigOptionsModel" + }, + "env": { + "title": "Env", + "type": "object" + }, + "inventory": { + "title": "Inventory", + "type": "object" + }, + "log": { + "title": "Log", + "type": "boolean" + }, + "name": { + "enum": ["ansible"], + "title": "Name", + "type": "string" + }, + "playbooks": { + "title": "Playbooks", + "type": "object" + } + }, + "title": "ProvisionerModel", + "type": "object" + }, + "ScenarioSequence": { + "additionalProperties": false, + "items": { + "enum": [ + "check", + "cleanup", + "converge", + "create", + "dependency", + "destroy", + "idempotence", + "lint", + "prepare", + "side_effect", + "syntax", + "test", + "verify" + ], + "type": "string" + }, + "title": "ScenarioSequence", + "type": "array" + }, + "VerifierModel": { + "additionalProperties": false, + "properties": { + "additional_files_or_dirs": { + "items": { + "type": "string" + }, + "title": "AdditionalFilesOrDirs", + "type": "array" + }, + "enabled": { + "title": "Enabled", + "type": "boolean" + }, + "env": { + "title": "Env", + "type": "object" + }, + "name": { + "default": "ansible", + "enum": ["ansible", "goss", "inspec", "testinfra"], + "title": "Name", + "type": "string" + }, + "options": { + "title": "Options", + "type": "object" + } + }, + "title": "VerifierModel", + "type": "object" + }, + "platform-network": { + "properties": { + "aliases": { + "items": { + "type": "string" + }, + "type": "array" + }, + "ipv4_address": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": ["name"], + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/molecule.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": ["molecule/*/molecule.yml"], + "properties": { + "dependency": { + "$ref": "#/$defs/MoleculeDependencyModel" + }, + "driver": { + "$ref": "#/$defs/MoleculeDriverModel" + }, + "lint": { + "title": "Lint", + "type": "string" + }, + "log": { + "default": true, + "title": "Log", + "type": "boolean" + }, + "platforms": { + "items": { + "$ref": "#/$defs/MoleculePlatformModel" + }, + "title": "Platforms", + "type": "array" + }, + "prerun": { + "title": "Prerun", + "type": "boolean" + }, + "provisioner": { + "$ref": "#/$defs/ProvisionerModel" + }, + "role_name_check": { + "enum": [0, 1, 2], + "title": "RoleNameCheck", + "type": "integer" + }, + "scenario": { + "$ref": "#/$defs/MoleculeScenarioModel" + }, + "verifier": { + "$ref": "#/$defs/VerifierModel" + } + }, + "required": ["driver", "platforms"], + "title": "Molecule Scenario Schema", + "type": "object" +} diff --git a/src/ansiblelint/schemas/playbook.json b/src/ansiblelint/schemas/playbook.json new file mode 100644 index 0000000..983033f --- /dev/null +++ b/src/ansiblelint/schemas/playbook.json @@ -0,0 +1,1245 @@ +{ + "$comment": "Generated from ansible.json, do not edit.", + "$defs": { + "ansible.builtin.import_playbook": { + "additionalProperties": false, + "oneOf": [ + { + "not": { + "required": [ + "import_playbook" + ] + }, + "required": [ + "ansible.builtin.import_playbook" + ] + }, + { + "not": { + "required": [ + "ansible.builtin.import_playbook" + ] + }, + "required": [ + "import_playbook" + ] + } + ], + "patternProperties": { + "^(ansible\\.builtin\\.)?import_playbook$": { + "markdownDescription": "* Includes a file with a list of plays to be executed.\n * Files with a list of plays can only be included at the top level.\n * You cannot use this action inside a play.\n\nSee [import_playbook](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/import_playbook_module.html)", + "title": "Import Playbook", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tags": { + "$ref": "#/$defs/tags" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "type": "object" + }, + "become_method": { + "anyOf": [ + { + "enum": [ + "ansible.builtin.sudo", + "ansible.builtin.su", + "community.general.pbrun", + "community.general.pfexec", + "ansible.builtin.runas", + "community.general.dzdo", + "community.general.ksu", + "community.general.doas", + "community.general.machinectl", + "community.general.pmrun", + "community.general.sesu", + "community.general.sudosu" + ], + "type": "string" + }, + { + "$ref": "#/$defs/full-jinja" + }, + { + "pattern": "[A-Za-z0-9_\\.]+", + "type": "string" + } + ], + "markdownDescription": "See [become](https://docs.ansible.com/ansible/latest/user_guide/become.html)", + "title": "Become Method" + }, + "block": { + "properties": { + "always": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Always", + "type": "array" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "block": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "markdownDescription": "Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages. See [blocks](https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html)", + "title": "Block", + "type": "array" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "port": { + "$ref": "#/$defs/templated-integer" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "rescue": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Rescue", + "type": "array" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": [ + "block" + ], + "type": "object" + }, + "complex_conditional": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + ] + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja" + } + ], + "title": "Environment" + }, + "full-jinja": { + "pattern": "^\\{[\\{%](.|[\r\n])*[\\}%]\\}$", + "type": "string" + }, + "ignore_errors": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "See [ignore_errors](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#ignoring-failed-commands)", + "title": "Ignore Errors" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "Use for protecting sensitive data. See [no_log](https://docs.ansible.com/ansible/latest/reference_appendices/logging.html)", + "title": "no_log" + }, + "play": { + "additionalProperties": false, + "allOf": [ + { + "not": { + "required": [ + "ansible.builtin.import_playbook" + ] + } + }, + { + "not": { + "required": [ + "import_playbook" + ] + } + } + ], + "properties": { + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "fact_path": { + "title": "Fact Path", + "type": "string" + }, + "force_handlers": { + "title": "Force Handlers", + "type": "boolean" + }, + "gather_facts": { + "title": "Gather Facts", + "type": "boolean" + }, + "gather_subset": { + "items": { + "anyOf": [ + { + "enum": [ + "all", + "min", + "all_ipv4_addresses", + "all_ipv6_addresses", + "apparmor", + "architecture", + "caps", + "chroot,cmdline", + "date_time", + "default_ipv4", + "default_ipv6", + "devices", + "distribution", + "distribution_major_version", + "distribution_release", + "distribution_version", + "dns", + "effective_group_ids", + "effective_user_id", + "env", + "facter", + "fips", + "hardware", + "interfaces", + "is_chroot", + "iscsi", + "kernel", + "local", + "lsb", + "machine", + "machine_id", + "mounts", + "network", + "ohai", + "os_family", + "pkg_mgr", + "platform", + "processor", + "processor_cores", + "processor_count", + "python", + "python_version", + "real_user_id", + "selinux", + "service_mgr", + "ssh_host_key_dsa_public", + "ssh_host_key_ecdsa_public", + "ssh_host_key_ed25519_public", + "ssh_host_key_rsa_public", + "ssh_host_pub_keys", + "ssh_pub_keys", + "system", + "system_capabilities", + "system_capabilities_enforced", + "user", + "user_dir", + "user_gecos", + "user_gid", + "user_id", + "user_shell", + "user_uid", + "virtual", + "virtualization_role", + "virtualization_type" + ], + "type": "string" + }, + { + "enum": [ + "!all", + "!min", + "!all_ipv4_addresses", + "!all_ipv6_addresses", + "!apparmor", + "!architecture", + "!caps", + "!chroot,cmdline", + "!date_time", + "!default_ipv4", + "!default_ipv6", + "!devices", + "!distribution", + "!distribution_major_version", + "!distribution_release", + "!distribution_version", + "!dns", + "!effective_group_ids", + "!effective_user_id", + "!env", + "!facter", + "!fips", + "!hardware", + "!interfaces", + "!is_chroot", + "!iscsi", + "!kernel", + "!local", + "!lsb", + "!machine", + "!machine_id", + "!mounts", + "!network", + "!ohai", + "!os_family", + "!pkg_mgr", + "!platform", + "!processor", + "!processor_cores", + "!processor_count", + "!python", + "!python_version", + "!real_user_id", + "!selinux", + "!service_mgr", + "!ssh_host_key_dsa_public", + "!ssh_host_key_ecdsa_public", + "!ssh_host_key_ed25519_public", + "!ssh_host_key_rsa_public", + "!ssh_host_pub_keys", + "!ssh_pub_keys", + "!system", + "!system_capabilities", + "!system_capabilities_enforced", + "!user", + "!user_dir", + "!user_gecos", + "!user_gid", + "!user_id", + "!user_shell", + "!user_uid", + "!virtual", + "!virtualization_role", + "!virtualization_type" + ], + "type": "string" + } + ] + }, + "title": "Gather Subset", + "type": "array" + }, + "gather_timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Gather Timeout" + }, + "handlers": { + "$ref": "#/$defs/tasks" + }, + "hosts": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Hosts" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "max_fail_percentage": { + "title": "Max Fail Percentage", + "type": "number" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "order": { + "enum": [ + "default", + "sorted", + "reverse_sorted", + "reverse_inventory", + "shuffle" + ], + "title": "Order", + "type": "string" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "post_tasks": { + "$ref": "#/$defs/tasks" + }, + "pre_tasks": { + "$ref": "#/$defs/tasks" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "roles": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/play-role" + }, + { + "type": "string" + } + ] + }, + "markdownDescription": "Roles let you automatically load related vars, files, tasks, handlers, and other Ansible artifacts based on a known file structure. After you group your content in roles, you can easily reuse them and share them with other users.\n See [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#roles)", + "title": "Roles", + "type": "array" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "serial": { + "anyOf": [ + { + "$ref": "#/$defs/templated-integer-or-percent" + }, + { + "items": { + "$ref": "#/$defs/templated-integer-or-percent" + }, + "type": "array" + } + ], + "markdownDescription": "Integer, percentage or list of those. See [Setting the batch size with serial](https://docs.ansible.com/ansible/latest/user_guide/playbooks_strategies.html#setting-the-batch-size-with-serial)", + "title": "Batch size" + }, + "strategy": { + "title": "Strategy", + "type": "string" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "tasks": { + "$ref": "#/$defs/tasks" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "user": { + "title": "Remote User", + "type": "string" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "vars_files": { + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "title": "Vars Files", + "type": [ + "array", + "string", + "null" + ] + }, + "vars_prompt": { + "items": { + "$ref": "#/$defs/vars_prompt" + }, + "markdownDescription": "See [vars_prompt](https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html)", + "title": "vars_prompt", + "type": "array" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": [ + "hosts" + ], + "title": "play", + "type": "object" + }, + "play-role": { + "markdownDescription": "See [roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#roles)", + "properties": { + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "role": { + "title": "Role", + "type": "string" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": [ + "role" + ], + "title": "play-role", + "type": "object" + }, + "tags": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Tags" + }, + "task": { + "additionalProperties": true, + "allOf": [ + { + "not": { + "required": [ + "hosts" + ] + } + }, + { + "not": { + "required": [ + "tasks" + ] + } + }, + { + "not": { + "required": [ + "import_playbook" + ] + } + }, + { + "not": { + "required": [ + "block" + ] + } + } + ], + "properties": { + "action": { + "title": "Action", + "type": "string" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "args": { + "$ref": "#/$defs/templated-object", + "title": "Args" + }, + "async": { + "$ref": "#/$defs/templated-integer", + "title": "Async" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "changed_when": { + "$ref": "#/$defs/complex_conditional", + "markdownDescription": "See [changed_when](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#defining-changed)", + "title": "Changed When" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delay": { + "$ref": "#/$defs/templated-integer", + "title": "Delay" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "failed_when": { + "$ref": "#/$defs/complex_conditional", + "title": "Failed When" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "listen": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "markdownDescription": "Applies only to handlers. See [listen](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html)", + "title": "Listen" + }, + "local_action": { + "title": "Local Action", + "type": [ + "string", + "object" + ] + }, + "loop": { + "title": "Loop", + "type": [ + "string", + "array" + ] + }, + "loop_control": { + "title": "Loop Control" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/no_log" + }, + "notify": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Notify" + }, + "poll": { + "$ref": "#/$defs/templated-integer", + "title": "Poll" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "register": { + "title": "Register", + "type": "string" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "retries": { + "$ref": "#/$defs/templated-integer", + "title": "Retries" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "until": { + "$ref": "#/$defs/complex_conditional", + "title": "Until" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + }, + "with_dict": { + "title": "With Dict" + }, + "with_fileglob": { + "title": "With Fileglob" + }, + "with_filetree": { + "title": "With Filetree" + }, + "with_first_found": { + "title": "With First Found" + }, + "with_indexed_items": { + "title": "With Indexed Items" + }, + "with_ini": { + "title": "With Ini" + }, + "with_inventory_hostnames": { + "title": "With Inventory Hostnames" + }, + "with_items": { + "anyOf": [ + { + "$ref": "#/$defs/full-jinja" + }, + { + "type": "array" + } + ], + "markdownDescription": "See [loops](https://docs.ansible.com/ansible/latest/user_guide/playbooks_loops.html#loops)", + "title": "With Items" + }, + "with_lines": { + "title": "With Lines" + }, + "with_random_choice": { + "title": "With Random Choice" + }, + "with_sequence": { + "title": "With Sequence" + }, + "with_subelements": { + "title": "With Subelements" + }, + "with_together": { + "title": "With Together" + } + }, + "title": "task", + "type": "object" + }, + "tasks": { + "$schema": "http://json-schema.org/draft-07/schema", + "examples": [ + "tasks/*.yml", + "handlers/*.yml" + ], + "items": { + "anyOf": [ + { + "$ref": "#/$defs/block" + }, + { + "$ref": "#/$defs/task" + } + ] + }, + "title": "Ansible Tasks Schema", + "type": [ + "array", + "null" + ] + }, + "templated-boolean": { + "oneOf": [ + { + "type": "boolean" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-integer": { + "oneOf": [ + { + "type": "integer" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-integer-or-percent": { + "oneOf": [ + { + "type": "integer" + }, + { + "pattern": "^\\d+\\.?\\d*%?$", + "type": "string" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-object": { + "oneOf": [ + { + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "vars_prompt": { + "additionalProperties": false, + "properties": { + "confirm": { + "title": "Confirm", + "type": "boolean" + }, + "default": { + "title": "Default", + "type": "string" + }, + "encrypt": { + "enum": [ + "des_crypt", + "bsdi_crypt", + "bigcrypt", + "crypt16", + "md5_crypt", + "bcrypt", + "sha1_crypt", + "sun_md5_crypt", + "sha256_crypt", + "sha512_crypt", + "apr_md5_crypt", + "phpass", + "pbkdf2_digest", + "cta_pbkdf2_sha1", + "dlitz_pbkdf2_sha1", + "scram", + "bsd_nthash" + ], + "title": "Encrypt", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "private": { + "default": true, + "title": "Private", + "type": "boolean" + }, + "prompt": { + "title": "Prompt", + "type": "string" + }, + "salt_size": { + "default": 8, + "title": "Salt Size", + "type": "integer" + }, + "unsafe": { + "default": false, + "markdownDescription": "See [unsafe](https://docs.ansible.com/ansible/latest/user_guide/playbooks_prompts.html#allowing-special-characters-in-vars-prompt-values)", + "title": "Unsafe", + "type": "boolean" + } + }, + "required": [ + "name", + "prompt" + ], + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/playbook.json", + "$schema": "http://json-schema.org/draft-07/schema", + "examples": [ + "playbooks/*.yml", + "playbooks/*.yaml" + ], + "items": { + "oneOf": [ + { + "$ref": "#/$defs/ansible.builtin.import_playbook" + }, + { + "$ref": "#/$defs/play" + } + ] + }, + "title": "Ansible Playbook", + "type": "array" +} diff --git a/src/ansiblelint/schemas/requirements.json b/src/ansiblelint/schemas/requirements.json new file mode 100644 index 0000000..dc7ded6 --- /dev/null +++ b/src/ansiblelint/schemas/requirements.json @@ -0,0 +1,135 @@ +{ + "$defs": { + "CollectionModel": { + "additionalProperties": false, + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "source": { + "title": "Source", + "type": "string" + }, + "type": { + "enum": ["galaxy", "url", "file", "git", "dir", "subdirs"], + "title": "Type", + "type": "string" + }, + "version": { + "title": "Version", + "type": "string" + } + }, + "title": "CollectionModel", + "type": "object" + }, + "CollectionStringModel": { + "title": "CollectionStringModel", + "type": "string" + }, + "IncludeModel": { + "properties": { + "include": { + "title": "Include", + "type": "string" + } + }, + "required": ["include"], + "title": "IncludeModel", + "type": "object" + }, + "RequirementsV2Model": { + "additionalProperties": false, + "anyOf": [ + { + "required": ["collections"] + }, + { + "required": ["roles"] + } + ], + "properties": { + "collections": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/CollectionModel" + }, + { + "$ref": "#/$defs/CollectionStringModel" + } + ] + }, + "title": "Collections", + "type": "array" + }, + "roles": { + "items": { + "$ref": "#/$defs/RoleModel" + }, + "title": "Roles", + "type": "array" + } + }, + "title": "Requirements v2", + "type": "object" + }, + "RoleModel": { + "additionalProperties": false, + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "scm": { + "anyOf": [ + { + "enum": ["git"], + "type": "string" + }, + { + "enum": ["hg"], + "type": "string" + } + ], + "default": "git", + "title": "Scm" + }, + "src": { + "title": "Src", + "type": "string" + }, + "version": { + "default": "master", + "title": "Version", + "type": "string" + } + }, + "title": "Role", + "type": "object" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/requirements.json", + "$schema": "http://json-schema.org/draft-07/schema", + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/RoleModel" + }, + { + "$ref": "#/$defs/IncludeModel" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/$defs/RequirementsV2Model" + } + ], + "examples": ["requirements.yml"], + "title": "Ansible Requirements Schema" +} diff --git a/src/ansiblelint/schemas/role-arg-spec.json b/src/ansiblelint/schemas/role-arg-spec.json new file mode 100644 index 0000000..433993e --- /dev/null +++ b/src/ansiblelint/schemas/role-arg-spec.json @@ -0,0 +1,250 @@ +{ + "$defs": { + "datatype": { + "enum": [ + "str", + "list", + "dict", + "bool", + "int", + "float", + "path", + "raw", + "jsonarg", + "json", + "bytes", + "bits" + ], + "type": "string" + }, + "deprecated_alias": { + "properties": { + "collection_name": { + "type": "string" + }, + "date": { + "type": "string" + }, + "name": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "required": ["name"], + "type": "object" + }, + "entry_point": { + "additionalProperties": false, + "properties": { + "author": { + "oneOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "description": { + "oneOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "options": { + "additionalProperties": { + "$ref": "#/$defs/option" + }, + "type": "object" + }, + "seealso": { + "items": { + "oneOf": [ + { + "additionalProperties": false, + "properties": { + "description": { + "type": "string" + }, + "module": { + "type": "string" + } + }, + "required": ["module"], + "type": "object" + }, + { + "additionalProperties": false, + "properties": { + "description": { + "type": "string" + }, + "plugin": { + "type": "string" + }, + "plugin_type": { + "type": "string" + } + }, + "required": ["plugin", "plugin_type"], + "type": "object" + }, + { + "additionalProperties": false, + "properties": { + "description": { + "type": "string" + }, + "ref": { + "type": "string" + } + }, + "required": ["description", "ref"], + "type": "object" + }, + { + "additionalProperties": false, + "properties": { + "description": { + "type": "string" + }, + "link": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": ["description", "link", "name"], + "type": "object" + } + ] + }, + "type": "array" + }, + "short_description": { + "type": "string" + }, + "version_added": { + "type": "string" + } + }, + "required": ["options"], + "title": "Entry Point", + "type": "object" + }, + "option": { + "additionalProperties": false, + "aliases": { + "items": { + "type": "string" + }, + "type": "array" + }, + "apply_defaults": { + "type": "string" + }, + "deprecated_aliases": { + "items": { + "$ref": "#/$defs/deprecated_alias" + }, + "type": "array" + }, + "markdownDescription": "xxx", + "options": { + "$ref": "#/$defs/option" + }, + "properties": { + "choices": { + "type": "array" + }, + "default": { + "default": "None" + }, + "description": { + "description": "Detailed explanation of what this option does. It should be written in full sentences.", + "oneOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ] + }, + "elements": { + "$ref": "#/$defs/datatype" + }, + "fallback": { + "default": "None", + "type": "string" + }, + "no_log": { + "default": false, + "type": "boolean" + }, + "option-name": { + "description": "The name of the option/argument.", + "type": "string" + }, + "options": { + "additionalProperties": { + "$ref": "#/$defs/option" + }, + "type": "object" + }, + "required": { + "default": false, + "type": "boolean" + }, + "type": { + "$ref": "#/$defs/datatype", + "markdownDescription": "See [argument-spec](https://docs.ansible.com/ansible/latest/dev_guide/developing_program_flow_modules.html#argument-spec" + }, + "version_added": { + "type": "string" + } + }, + "removed_at_date": { + "type": "string" + }, + "removed_from_collection": { + "type": "string" + }, + "removed_in_version": { + "type": "string" + }, + "title": "Option" + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/role-arg-spec.json", + "$schema": "http://json-schema.org/draft-07/schema", + "additionalProperties": false, + "examples": ["meta/argument_specs.yml"], + "markdownDescription": "Add entry point, usually `main`.\nSee [role-argument-validation](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#role-argument-validation)", + "properties": { + "argument_specs": { + "additionalProperties": { + "$ref": "#/$defs/entry_point" + }, + "markdownDescription": "Add entry point, usually `main`.\nSee [role-argument-validation](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html#role-argument-validation)" + } + }, + "title": "Ansible Role Argument Specs Schema" +} diff --git a/src/ansiblelint/schemas/rulebook.json b/src/ansiblelint/schemas/rulebook.json new file mode 100644 index 0000000..6c441cd --- /dev/null +++ b/src/ansiblelint/schemas/rulebook.json @@ -0,0 +1,645 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://raw.githubusercontent.com/ansible/ansible-rulebook/main/ansible_rulebook/schema/ruleset_schema.json", + "type": "array", + "items": { + "$ref": "#/$defs/ruleset" + }, + "minItems": 1, + "examples": [ + "rulebooks/*.yml", + "rulebooks/*.yaml" + ], + "$defs": { + "ruleset": { + "type": "object", + "properties": { + "default_events_ttl": { + "type": "string", + "pattern": "^\\d+\\s(seconds?|minutes?|hours?|days?)$" + }, + "hosts": { + "type": "string" + }, + "gather_facts": { + "type": "boolean", + "default": false + }, + "name": { + "type": "string" + }, + "execution_strategy": { + "type": "string", + "enum": ["sequential", "parallel"], + "default": "sequential" + }, + "sources": { + "type": "array", + "items": { + "$ref": "#/$defs/source" + } + }, + "rules": { + "type": "array", + "items": { + "$ref": "#/$defs/rule" + } + } + }, + "required": [ + "hosts", + "sources", + "rules" + ], + "additionalProperties": false + }, + "source": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "filters": { + "type": "array", + "items": { + "type": "object" + } + } + }, + "additionalProperties": { + "oneOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ] + } + }, + "throttle": { + "type": "object", + "oneOf": [ + { + "required": [ + "once_within", + "group_by_attributes" + ] + }, + { + "required": [ + "once_after", + "group_by_attributes" + ] + } + ], + "properties": { + "once_within": { + "type": "string", + "pattern": "^\\d+\\s(milliseconds?|seconds?|minutes?|hours?|days?)$" + }, + "once_after": { + "type": "string", + "pattern": "^\\d+\\s(milliseconds?|seconds?|minutes?|hours?|days?)$" + }, + "group_by_attributes": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "rule": { + "type": "object", + "oneOf": [ + { + "required": [ + "name", + "condition", + "actions" + ] + }, + { + "required": [ + "name", + "condition", + "action" + ] + } + ], + "properties": { + "name": { + "type": "string", + "minLength": 1, + "pattern": "\\S" + }, + "enabled": { + "type": "boolean" + }, + "throttle": { + "$ref": "#/$defs/throttle" + }, + "condition": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/$defs/all-condition" + }, + { + "$ref": "#/$defs/any-condition" + }, + { + "$ref": "#/$defs/not-all-condition" + } + ] + }, + "actions": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/$defs/run-playbook-action" + }, + { + "$ref": "#/$defs/run-module-action" + }, + { + "$ref": "#/$defs/run-job-template-action" + }, + { + "$ref": "#/$defs/post-event-action" + }, + { + "$ref": "#/$defs/set-fact-action" + }, + { + "$ref": "#/$defs/retract-fact-action" + }, + { + "$ref": "#/$defs/print-event-action" + }, + { + "$ref": "#/$defs/debug-action" + }, + { + "$ref": "#/$defs/none-action" + }, + { + "$ref": "#/$defs/shutdown-action" + } + ] + } + }, + "action": { + "oneOf": [ + { + "$ref": "#/$defs/run-playbook-action" + }, + { + "$ref": "#/$defs/run-module-action" + }, + { + "$ref": "#/$defs/run-job-template-action" + }, + { + "$ref": "#/$defs/post-event-action" + }, + { + "$ref": "#/$defs/set-fact-action" + }, + { + "$ref": "#/$defs/retract-fact-action" + }, + { + "$ref": "#/$defs/print-event-action" + }, + { + "$ref": "#/$defs/debug-action" + }, + { + "$ref": "#/$defs/none-action" + }, + { + "$ref": "#/$defs/shutdown-action" + } + ] + } + }, + "additionalProperties": false + }, + "all-condition": { + "type": "object", + "properties": { + "all": { + "type": "array", + "items": { + "type": "string" + } + }, + "timeout": { + "type": "string", + "pattern": "^\\d+\\s(milliseconds?|seconds?|minutes?|hours?|days?)$" + } + }, + "additionalProperties": false + }, + "not-all-condition": { + "type": "object", + "properties": { + "not_all": { + "type": "array", + "items": { + "type": "string" + } + }, + "timeout": { + "type": "string", + "pattern": "^\\d+\\s(milliseconds?|seconds?|minutes?|hours?|days?)$" + } + }, + "required": [ + "timeout", + "not_all" + ], + "additionalProperties": false + }, + "any-condition": { + "type": "object", + "properties": { + "any": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "run-playbook-action": { + "type": "object", + "properties": { + "run_playbook": { + "type": "object", + "properties": { + "copy_files": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "post_events": { + "type": "boolean" + }, + "set_facts": { + "type": "boolean" + }, + "ruleset": { + "type": "string" + }, + "verbosity": { + "type": "integer" + }, + "var_root": { + "type": [ + "string", + "object" + ] + }, + "json_mode": { + "type": "boolean" + }, + "retry": { + "type": "boolean" + }, + "retries": { + "type": "integer" + }, + "delay": { + "type": "number" + }, + "extra_vars": { + "type": "object" + } + }, + "required": [ + "name" + ], + "additionalProperties": false + } + }, + "required": [ + "run_playbook" + ], + "additionalProperties": false + }, + "run-module-action": { + "type": "object", + "properties": { + "run_module": { + "type": "object", + "properties": { + "copy_files": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "post_events": { + "type": "boolean" + }, + "set_facts": { + "type": "boolean" + }, + "verbosity": { + "type": "integer" + }, + "var_root": { + "type": [ + "string", + "object" + ] + }, + "json_mode": { + "type": "boolean" + }, + "retry": { + "type": "boolean" + }, + "retries": { + "type": "integer" + }, + "delay": { + "type": "number" + }, + "module_args": { + "type": "object" + }, + "extra_vars": { + "type": "object" + } + }, + "required": [ + "name" + ], + "additionalProperties": false + } + }, + "required": [ + "run_module" + ], + "additionalProperties": false + }, + "run-job-template-action": { + "type": "object", + "properties": { + "run_job_template": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "organization": { + "type": "string" + }, + "job_args": { + "type": "object" + }, + "post_events": { + "type": "boolean" + }, + "set_facts": { + "type": "boolean" + }, + "ruleset": { + "type": "string" + }, + "var_root": { + "type": "string" + }, + "retry": { + "type": "boolean" + }, + "retries": { + "type": "integer" + }, + "delay": { + "type": "integer" + } + }, + "required": [ + "name", + "organization" + ], + "additionalProperties": false + } + }, + "required": [ + "run_job_template" + ], + "additionalProperties": false + }, + "post-event-action": { + "type": "object", + "properties": { + "post_event": { + "type": "object", + "properties": { + "ruleset": { + "type": "string" + }, + "event": { + "type": "object" + } + }, + "required": [ + "event" + ], + "additionalProperties": false + } + }, + "required": [ + "post_event" + ], + "additionalProperties": false + }, + "set-fact-action": { + "type": "object", + "properties": { + "set_fact": { + "type": "object", + "properties": { + "ruleset": { + "type": "string" + }, + "fact": { + "type": "object" + } + }, + "required": [ + "fact" + ], + "additionalProperties": false + } + }, + "required": [ + "set_fact" + ], + "additionalProperties": false + }, + "retract-fact-action": { + "type": "object", + "properties": { + "retract_fact": { + "type": "object", + "properties": { + "ruleset": { + "type": "string" + }, + "fact": { + "type": "object" + }, + "partial": { + "type": "boolean", + "default": true + } + }, + "required": [ + "fact" + ], + "additionalProperties": false + } + }, + "required": [ + "retract_fact" + ], + "additionalProperties": false + }, + "print-event-action": { + "type": "object", + "properties": { + "print_event": { + "type": [ + "object", + "null" + ], + "properties": { + "var_root": { + "type": [ + "string", + "object" + ] + }, + "pretty": { + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "required": [ + "print_event" + ], + "additionalProperties": false + }, + "debug-msg": { + "type": "object", + "properties": { + "msg": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ] + } + }, + "additionalProperties": false + }, + "debug-var": { + "type": "object", + "properties": { + "var": { + "type": "string" + } + }, + "additionalProperties": false + }, + "debug-action": { + "type": "object", + "properties": { + "debug": { + "anyOf": [ + { + "$ref": "#/$defs/debug-msg" + }, + { + "$ref": "#/$defs/debug-var" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "required": [ + "debug" + ] + }, + "none-action": { + "type": "object", + "properties": { + "none": { + "type": [ + "object", + "null" + ] + } + }, + "required": [ + "none" + ], + "additionalProperties": false + }, + "shutdown-action": { + "type": "object", + "properties": { + "shutdown": { + "type": [ + "object", + "null" + ], + "properties": { + "delay": { + "type": "number" + }, + "message": { + "type": "string" + }, + "kind": { + "type": "string", + "enum": [ + "graceful", + "now" + ] + } + }, + "additionalProperties": false + } + }, + "required": [ + "shutdown" + ], + "additionalProperties": false + } + } +} diff --git a/src/ansiblelint/schemas/tasks.json b/src/ansiblelint/schemas/tasks.json new file mode 100644 index 0000000..ec7f85d --- /dev/null +++ b/src/ansiblelint/schemas/tasks.json @@ -0,0 +1,588 @@ +{ + "$comment": "Generated from ansible.json, do not edit.", + "$defs": { + "become_method": { + "anyOf": [ + { + "enum": [ + "ansible.builtin.sudo", + "ansible.builtin.su", + "community.general.pbrun", + "community.general.pfexec", + "ansible.builtin.runas", + "community.general.dzdo", + "community.general.ksu", + "community.general.doas", + "community.general.machinectl", + "community.general.pmrun", + "community.general.sesu", + "community.general.sudosu" + ], + "type": "string" + }, + { + "$ref": "#/$defs/full-jinja" + }, + { + "pattern": "[A-Za-z0-9_\\.]+", + "type": "string" + } + ], + "markdownDescription": "See [become](https://docs.ansible.com/ansible/latest/user_guide/become.html)", + "title": "Become Method" + }, + "block": { + "properties": { + "always": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Always", + "type": "array" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "block": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "markdownDescription": "Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages. See [blocks](https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html)", + "title": "Block", + "type": "array" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean" + }, + "port": { + "$ref": "#/$defs/templated-integer" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "rescue": { + "items": { + "anyOf": [ + { + "$ref": "#/$defs/task" + }, + { + "$ref": "#/$defs/block" + } + ] + }, + "title": "Rescue", + "type": "array" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + } + }, + "required": [ + "block" + ], + "type": "object" + }, + "complex_conditional": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + ] + }, + "environment": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja" + } + ], + "title": "Environment" + }, + "full-jinja": { + "pattern": "^\\{[\\{%](.|[\r\n])*[\\}%]\\}$", + "type": "string" + }, + "ignore_errors": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "See [ignore_errors](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#ignoring-failed-commands)", + "title": "Ignore Errors" + }, + "no_log": { + "$ref": "#/$defs/templated-boolean", + "markdownDescription": "Use for protecting sensitive data. See [no_log](https://docs.ansible.com/ansible/latest/reference_appendices/logging.html)", + "title": "no_log" + }, + "tags": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Tags" + }, + "task": { + "additionalProperties": true, + "allOf": [ + { + "not": { + "required": [ + "hosts" + ] + } + }, + { + "not": { + "required": [ + "tasks" + ] + } + }, + { + "not": { + "required": [ + "import_playbook" + ] + } + }, + { + "not": { + "required": [ + "block" + ] + } + } + ], + "properties": { + "action": { + "title": "Action", + "type": "string" + }, + "any_errors_fatal": { + "$ref": "#/$defs/templated-boolean", + "title": "Any Errors Fatal" + }, + "args": { + "$ref": "#/$defs/templated-object", + "title": "Args" + }, + "async": { + "$ref": "#/$defs/templated-integer", + "title": "Async" + }, + "become": { + "$ref": "#/$defs/templated-boolean", + "title": "Become" + }, + "become_exe": { + "title": "Become Exe", + "type": "string" + }, + "become_flags": { + "title": "Become Flags", + "type": "string" + }, + "become_method": { + "$ref": "#/$defs/become_method" + }, + "become_user": { + "title": "Become User", + "type": "string" + }, + "changed_when": { + "$ref": "#/$defs/complex_conditional", + "markdownDescription": "See [changed_when](https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#defining-changed)", + "title": "Changed When" + }, + "check_mode": { + "$ref": "#/$defs/complex_conditional", + "title": "Check Mode" + }, + "collections": { + "items": { + "type": "string" + }, + "title": "Collections", + "type": "array" + }, + "connection": { + "title": "Connection", + "type": "string" + }, + "debugger": { + "title": "Debugger", + "type": "string" + }, + "delay": { + "$ref": "#/$defs/templated-integer", + "title": "Delay" + }, + "delegate_facts": { + "title": "Delegate Facts", + "type": "boolean" + }, + "delegate_to": { + "title": "Delegate To", + "type": "string" + }, + "diff": { + "$ref": "#/$defs/templated-boolean", + "title": "Diff" + }, + "environment": { + "$ref": "#/$defs/environment" + }, + "failed_when": { + "$ref": "#/$defs/complex_conditional", + "title": "Failed When" + }, + "ignore_errors": { + "$ref": "#/$defs/ignore_errors" + }, + "ignore_unreachable": { + "title": "Ignore Unreachable", + "type": "boolean" + }, + "listen": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "markdownDescription": "Applies only to handlers. See [listen](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html)", + "title": "Listen" + }, + "local_action": { + "title": "Local Action", + "type": [ + "string", + "object" + ] + }, + "loop": { + "title": "Loop", + "type": [ + "string", + "array" + ] + }, + "loop_control": { + "title": "Loop Control" + }, + "module_defaults": { + "title": "Module Defaults" + }, + "name": { + "title": "Name", + "type": "string" + }, + "no_log": { + "$ref": "#/$defs/no_log" + }, + "notify": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Notify" + }, + "poll": { + "$ref": "#/$defs/templated-integer", + "title": "Poll" + }, + "port": { + "$ref": "#/$defs/templated-integer", + "title": "Port" + }, + "register": { + "title": "Register", + "type": "string" + }, + "remote_user": { + "title": "Remote User", + "type": "string" + }, + "retries": { + "$ref": "#/$defs/templated-integer", + "title": "Retries" + }, + "run_once": { + "$ref": "#/$defs/templated-boolean", + "title": "Run Once" + }, + "tags": { + "$ref": "#/$defs/tags", + "title": "Tags" + }, + "throttle": { + "$ref": "#/$defs/templated-integer", + "title": "Throttle" + }, + "timeout": { + "$ref": "#/$defs/templated-integer", + "title": "Timeout" + }, + "until": { + "$ref": "#/$defs/complex_conditional", + "title": "Until" + }, + "vars": { + "title": "Vars", + "type": "object" + }, + "when": { + "$ref": "#/$defs/complex_conditional", + "title": "When" + }, + "with_dict": { + "title": "With Dict" + }, + "with_fileglob": { + "title": "With Fileglob" + }, + "with_filetree": { + "title": "With Filetree" + }, + "with_first_found": { + "title": "With First Found" + }, + "with_indexed_items": { + "title": "With Indexed Items" + }, + "with_ini": { + "title": "With Ini" + }, + "with_inventory_hostnames": { + "title": "With Inventory Hostnames" + }, + "with_items": { + "anyOf": [ + { + "$ref": "#/$defs/full-jinja" + }, + { + "type": "array" + } + ], + "markdownDescription": "See [loops](https://docs.ansible.com/ansible/latest/user_guide/playbooks_loops.html#loops)", + "title": "With Items" + }, + "with_lines": { + "title": "With Lines" + }, + "with_random_choice": { + "title": "With Random Choice" + }, + "with_sequence": { + "title": "With Sequence" + }, + "with_subelements": { + "title": "With Subelements" + }, + "with_together": { + "title": "With Together" + } + }, + "title": "task", + "type": "object" + }, + "templated-boolean": { + "oneOf": [ + { + "type": "boolean" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-integer": { + "oneOf": [ + { + "type": "integer" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + }, + "templated-object": { + "oneOf": [ + { + "type": "object" + }, + { + "$ref": "#/$defs/full-jinja", + "type": "string" + } + ] + } + }, + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/tasks.json", + "$schema": "http://json-schema.org/draft-07/schema", + "examples": [ + "tasks/*.yml", + "handlers/*.yml" + ], + "items": { + "anyOf": [ + { + "$ref": "#/$defs/block" + }, + { + "$ref": "#/$defs/task" + } + ] + }, + "title": "Ansible Tasks Schema", + "type": [ + "array", + "null" + ] +} diff --git a/src/ansiblelint/schemas/vars.json b/src/ansiblelint/schemas/vars.json new file mode 100644 index 0000000..c0b66e8 --- /dev/null +++ b/src/ansiblelint/schemas/vars.json @@ -0,0 +1,29 @@ +{ + "$id": "https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/vars.json", + "$schema": "http://json-schema.org/draft-07/schema", + "anyOf": [ + { + "additionalProperties": false, + "patternProperties": { + "^(?!(False|None|True|and|any_errors_fatal|as|assert|async|await|become|become_exe|become_flags|become_method|become_user|break|check_mode|class|collections|connection|continue|debugger|def|del|diff|elif|else|environment|except|fact_path|finally|for|force_handlers|from|gather_facts|gather_subset|gather_timeout|global|handlers|hosts|if|ignore_errors|ignore_unreachable|import|in|is|lambda|max_fail_percentage|module_defaults|name|no_log|nonlocal|not|or|order|pass|port|post_tasks|pre_tasks|raise|remote_user|return|roles|run_once|serial|strategy|tags|tasks|throttle|timeout|try|vars|vars_files|vars_prompt|while|with|yield)$)[a-zA-Z_][\\w]*$": {} + }, + "type": "object" + }, + { + "pattern": "^\\$ANSIBLE_VAULT;", + "type": "string" + }, + { + "type": "null" + } + ], + "examples": [ + "playbooks/vars/*.yml", + "vars/*.yml", + "defaults/*.yml", + "host_vars/*.yml", + "group_vars/*.yml" + ], + "markdownDescription": "See [Using Variables](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html)", + "title": "Ansible Vars Schema" +} diff --git a/src/ansiblelint/skip_utils.py b/src/ansiblelint/skip_utils.py new file mode 100644 index 0000000..f2f6177 --- /dev/null +++ b/src/ansiblelint/skip_utils.py @@ -0,0 +1,316 @@ +# (c) 2019-2020, Ansible by Red Hat +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +"""Utils related to inline skipping of rules.""" +from __future__ import annotations + +import collections.abc +import logging +import re +import warnings +from functools import cache +from itertools import product +from typing import TYPE_CHECKING, Any + +# Module 'ruamel.yaml' does not explicitly export attribute 'YAML'; implicit reexport disabled +from ruamel.yaml import YAML +from ruamel.yaml.composer import ComposerError +from ruamel.yaml.scanner import ScannerError +from ruamel.yaml.tokens import CommentToken + +from ansiblelint.config import used_old_tags +from ansiblelint.constants import ( + NESTED_TASK_KEYS, + PLAYBOOK_TASK_KEYWORDS, + RENAMED_TAGS, + SKIPPED_RULES_KEY, +) +from ansiblelint.errors import LintWarning, WarnSource + +if TYPE_CHECKING: + from collections.abc import Generator, Sequence + + from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject + + from ansiblelint.file_utils import Lintable + + +_logger = logging.getLogger(__name__) +_found_deprecated_tags: set[str] = set() +_noqa_comment_re = re.compile(r"^# noqa(\s|:)") + +# playbook: Sequence currently expects only instances of one of the two +# classes below but we should consider avoiding this chimera. +# ruamel.yaml.comments.CommentedSeq +# ansible.parsing.yaml.objects.AnsibleSequence + + +def get_rule_skips_from_line( + line: str, + lintable: Lintable, + lineno: int = 1, +) -> list[str]: + """Return list of rule ids skipped via comment on the line of yaml.""" + _before_noqa, _noqa_marker, noqa_text = line.partition("# noqa") + + result = [] + for v in noqa_text.lstrip(" :").split(): + if v in RENAMED_TAGS: + tag = RENAMED_TAGS[v] + if v not in _found_deprecated_tags: + msg = f"Replaced outdated tag '{v}' with '{tag}', replace it to avoid future errors" + warnings.warn( + message=msg, + category=LintWarning, + source=WarnSource( + filename=lintable, + lineno=lineno, + tag="warning[outdated-tag]", + message=msg, + ), + stacklevel=0, + ) + _found_deprecated_tags.add(v) + v = tag + result.append(v) + return result + + +def append_skipped_rules( + pyyaml_data: AnsibleBaseYAMLObject, + lintable: Lintable, +) -> AnsibleBaseYAMLObject: + """Append 'skipped_rules' to individual tasks or single metadata block. + + For a file, uses 2nd parser (ruamel.yaml) to pull comments out of + yaml subsets, check for '# noqa' skipped rules, and append any skips to the + original parser (pyyaml) data relied on by remainder of ansible-lint. + + :param pyyaml_data: file text parsed via ansible and pyyaml. + :param file_text: raw file text. + :param file_type: type of file: tasks, handlers or meta. + :returns: original pyyaml_data altered with a 'skipped_rules' list added \ + to individual tasks, or added to the single metadata block. + """ + try: + yaml_skip = _append_skipped_rules(pyyaml_data, lintable) + except RuntimeError: + # Notify user of skip error, do not stop, do not change exit code + _logger.exception("Error trying to append skipped rules") + return pyyaml_data + + if not yaml_skip: + return pyyaml_data + + return yaml_skip + + +@cache +def load_data(file_text: str) -> Any: + """Parse ``file_text`` as yaml and return parsed structure. + + This is the main culprit for slow performance, each rule asks for loading yaml again and again + ideally the ``maxsize`` on the decorator above MUST be great or equal total number of rules + :param file_text: raw text to parse + :return: Parsed yaml + """ + yaml = YAML() + # Ruamel role is not to validate the yaml file, so we ignore duplicate keys: + yaml.allow_duplicate_keys = True + try: + return yaml.load(file_text) + except ComposerError: + # load fails on multi-documents with ComposerError exception + return yaml.load_all(file_text) + + +def _append_skipped_rules( + pyyaml_data: AnsibleBaseYAMLObject, + lintable: Lintable, +) -> AnsibleBaseYAMLObject | None: + # parse file text using 2nd parser library + try: + ruamel_data = load_data(lintable.content) + except ScannerError as exc: + _logger.debug( + "Ignored loading skipped rules from file %s due to: %s", + lintable, + exc, + ) + # For unparsable file types, we return empty skip lists + return None + skipped_rules = _get_rule_skips_from_yaml(ruamel_data, lintable) + + if lintable.kind in [ + "yaml", + "requirements", + "vars", + "meta", + "reno", + "test-meta", + "galaxy", + ]: + # AnsibleMapping, dict + if hasattr(pyyaml_data, "get"): + pyyaml_data[SKIPPED_RULES_KEY] = skipped_rules + # AnsibleSequence, list + elif ( + not isinstance(pyyaml_data, str) + and isinstance(pyyaml_data, collections.abc.Sequence) + and skipped_rules + ): + pyyaml_data[0][SKIPPED_RULES_KEY] = skipped_rules + + return pyyaml_data + + # create list of blocks of tasks or nested tasks + if lintable.kind in ("tasks", "handlers"): + ruamel_task_blocks = ruamel_data + pyyaml_task_blocks = pyyaml_data + elif lintable.kind == "playbook": + try: + pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data) + ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data) + except (AttributeError, TypeError): + return pyyaml_data + else: + # For unsupported file types, we return empty skip lists + return None + + # get tasks from blocks of tasks + pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks) + ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks) + + # append skipped_rules for each task + for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks): + # ignore empty tasks + if not pyyaml_task and not ruamel_task: + continue + + # AnsibleUnicode or str + if isinstance(pyyaml_task, str): + continue + + if pyyaml_task.get("name") != ruamel_task.get("name"): + msg = "Error in matching skip comment to a task" + raise RuntimeError(msg) + pyyaml_task[SKIPPED_RULES_KEY] = _get_rule_skips_from_yaml( + ruamel_task, + lintable, + ) + + return pyyaml_data + + +def _get_task_blocks_from_playbook(playbook: Sequence[Any]) -> list[Any]: + """Return parts of playbook that contains tasks, and nested tasks. + + :param playbook: playbook yaml from yaml parser. + :returns: list of task dictionaries. + """ + task_blocks = [] + for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS): + task_blocks.extend(play.get(key, [])) + return task_blocks + + +def _get_tasks_from_blocks(task_blocks: Sequence[Any]) -> Generator[Any, None, None]: + """Get list of tasks from list made of tasks and nested tasks.""" + if not task_blocks: + return + + def get_nested_tasks(task: Any) -> Generator[Any, None, None]: + if not task or not is_nested_task(task): + return + for k in NESTED_TASK_KEYS: + if k in task and task[k]: + if hasattr(task[k], "get"): + continue + for subtask in task[k]: + yield from get_nested_tasks(subtask) + yield subtask + + for task in task_blocks: + yield from get_nested_tasks(task) + yield task + + +def _get_rule_skips_from_yaml( + yaml_input: Sequence[Any], + lintable: Lintable, +) -> Sequence[Any]: + """Traverse yaml for comments with rule skips and return list of rules.""" + yaml_comment_obj_strings = [] + + if isinstance(yaml_input, str): + return [] + + def traverse_yaml(obj: Any) -> None: + for entry in obj.ca.items.values(): + for v in entry: + if isinstance(v, CommentToken): + comment_str = v.value + if _noqa_comment_re.match(comment_str): + line = v.start_mark.line + 1 # ruamel line numbers start at 0 + lintable.line_skips[line].update( + get_rule_skips_from_line( + comment_str.strip(), + lintable=lintable, + lineno=line, + ), + ) + yaml_comment_obj_strings.append(str(obj.ca.items)) + if isinstance(obj, dict): + for val in obj.values(): + if isinstance(val, (dict, list)): + traverse_yaml(val) + elif isinstance(obj, list): + for element in obj: + if isinstance(element, (dict, list)): + traverse_yaml(element) + else: + return + + if isinstance(yaml_input, (dict, list)): + traverse_yaml(yaml_input) + + rule_id_list = [] + for comment_obj_str in yaml_comment_obj_strings: + for line in comment_obj_str.split(r"\n"): + rule_id_list.extend(get_rule_skips_from_line(line, lintable=lintable)) + + return [normalize_tag(tag) for tag in rule_id_list] + + +def normalize_tag(tag: str) -> str: + """Return current name of tag.""" + if tag in RENAMED_TAGS: + used_old_tags[tag] = RENAMED_TAGS[tag] + return RENAMED_TAGS[tag] + return tag + + +def is_nested_task(task: dict[str, Any]) -> bool: + """Check if task includes block/always/rescue.""" + # Cannot really trust the input + if isinstance(task, str): + return False + + return any(task.get(key) for key in NESTED_TASK_KEYS) diff --git a/src/ansiblelint/stats.py b/src/ansiblelint/stats.py new file mode 100644 index 0000000..67320b8 --- /dev/null +++ b/src/ansiblelint/stats.py @@ -0,0 +1,36 @@ +"""Module hosting functionality about reporting.""" +from __future__ import annotations + +from dataclasses import dataclass, field + + +@dataclass(order=True) +class TagStats: + """Tag statistics.""" + + order: int = 0 # to be computed based on rule's profile + tag: str = "" # rule effective id (can be multiple tags per rule id) + count: int = 0 # total number of occurrences + warning: bool = False # set true if listed in warn_list + profile: str = "" + associated_tags: list[str] = field(default_factory=list) + + +class SummarizedResults: + """The statistics about an ansible-lint run.""" + + failures: int = 0 + warnings: int = 0 + fixed_failures: int = 0 + fixed_warnings: int = 0 + tag_stats: dict[str, TagStats] = {} + passed_profile: str = "" + + @property + def fixed(self) -> int: + """Get total fixed count.""" + return self.fixed_failures + self.fixed_warnings + + def sort(self) -> None: + """Sort tag stats by tag name.""" + self.tag_stats = dict(sorted(self.tag_stats.items(), key=lambda t: t[1])) diff --git a/src/ansiblelint/testing/__init__.py b/src/ansiblelint/testing/__init__.py new file mode 100644 index 0000000..e7f6c1b --- /dev/null +++ b/src/ansiblelint/testing/__init__.py @@ -0,0 +1,159 @@ +"""Test utils for ansible-lint.""" +from __future__ import annotations + +import os +import shutil +import subprocess +import sys +import tempfile +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from ansiblelint.app import get_app + +if TYPE_CHECKING: + # https://github.com/PyCQA/pylint/issues/3240 + # pylint: disable=unsubscriptable-object + CompletedProcess = subprocess.CompletedProcess[Any] + from ansiblelint.errors import MatchError + from ansiblelint.rules import RulesCollection +else: + CompletedProcess = subprocess.CompletedProcess + +# pylint: disable=wrong-import-position +from ansiblelint.runner import Runner + + +class RunFromText: + """Use Runner on temp files created from testing text snippets.""" + + app = None + + def __init__(self, collection: RulesCollection) -> None: + """Initialize a RunFromText instance with rules collection.""" + # Emulate command line execution initialization as without it Ansible module + # would be loaded with incomplete module/role/collection list. + if not self.app: # pragma: no cover + self.app = get_app(offline=True) + + self.collection = collection + + def _call_runner(self, path: Path) -> list[MatchError]: + runner = Runner(path, rules=self.collection) + return runner.run() + + def run(self, filename: Path) -> list[MatchError]: + """Lints received filename.""" + return self._call_runner(filename) + + def run_playbook( + self, + playbook_text: str, + prefix: str = "playbook", + ) -> list[MatchError]: + """Lints received text as a playbook.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", prefix=prefix) as fh: + fh.write(playbook_text) + fh.flush() + results = self._call_runner(Path(fh.name)) + return results + + def run_role_tasks_main( + self, + tasks_main_text: str, + tmp_path: Path, + ) -> list[MatchError]: + """Lints received text as tasks.""" + role_path = tmp_path + tasks_path = role_path / "tasks" + tasks_path.mkdir(parents=True, exist_ok=True) + with (tasks_path / "main.yml").open("w", encoding="utf-8") as fh: + fh.write(tasks_main_text) + fh.flush() + results = self._call_runner(role_path) + shutil.rmtree(role_path) + return results + + def run_role_meta_main( + self, + meta_main_text: str, + temp_path: Path, + ) -> list[MatchError]: + """Lints received text as meta.""" + role_path = temp_path + meta_path = role_path / "meta" + meta_path.mkdir(parents=True, exist_ok=True) + with (meta_path / "main.yml").open("w", encoding="utf-8") as fh: + fh.write(meta_main_text) + fh.flush() + results = self._call_runner(role_path) + shutil.rmtree(role_path) + return results + + def run_role_defaults_main( + self, + defaults_main_text: str, + tmp_path: Path, + ) -> list[MatchError]: + """Lints received text as vars file in defaults.""" + role_path = tmp_path + defaults_path = role_path / "defaults" + defaults_path.mkdir(parents=True, exist_ok=True) + with (defaults_path / "main.yml").open("w", encoding="utf-8") as fh: + fh.write(defaults_main_text) + fh.flush() + results = self._call_runner(role_path) + shutil.rmtree(role_path) + return results + + +def run_ansible_lint( + *argv: str | Path, + cwd: Path | None = None, + executable: str | None = None, + env: dict[str, str] | None = None, + offline: bool = True, +) -> CompletedProcess: + """Run ansible-lint on a given path and returns its output.""" + args = [str(item) for item in argv] + if offline: # pragma: no cover + args.insert(0, "--offline") + + if not executable: + executable = sys.executable + args = [sys.executable, "-m", "ansiblelint", *args] + else: + args = [executable, *args] + + # It is not safe to pass entire env for testing as other tests would + # pollute the env, causing weird behaviors, so we pass only a safe list of + # vars. + safe_list = [ + "COVERAGE_FILE", + "COVERAGE_PROCESS_START", + "HOME", + "LANG", + "LC_ALL", + "LC_CTYPE", + "NO_COLOR", + "PATH", + "PYTHONIOENCODING", + "PYTHONPATH", + "TERM", + "VIRTUAL_ENV", + ] + + _env = {} if env is None else env + for v in safe_list: + if v in os.environ and v not in _env: + _env[v] = os.environ[v] + + return subprocess.run( + args, + capture_output=True, + shell=False, # needed when command is a list + check=False, + cwd=cwd, + env=_env, + text=True, + ) diff --git a/src/ansiblelint/testing/fixtures.py b/src/ansiblelint/testing/fixtures.py new file mode 100644 index 0000000..814a076 --- /dev/null +++ b/src/ansiblelint/testing/fixtures.py @@ -0,0 +1,63 @@ +"""PyTest Fixtures. + +They should not be imported, instead add code below to your root conftest.py +file: + +pytest_plugins = ['ansiblelint.testing'] +""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING + +import pytest + +from ansiblelint.config import Options, options +from ansiblelint.constants import DEFAULT_RULESDIR +from ansiblelint.rules import RulesCollection +from ansiblelint.testing import RunFromText + +if TYPE_CHECKING: + from collections.abc import Iterator + + from _pytest.fixtures import SubRequest + + +# The sessions scope does not apply to xdist, so we will still have one +# session for each worker, but at least it will a limited number. +@pytest.fixture(name="default_rules_collection", scope="session") +def fixture_default_rules_collection() -> RulesCollection: + """Return default rule collection.""" + assert DEFAULT_RULESDIR.is_dir() + # For testing we want to manually enable opt-in rules + test_options = copy.deepcopy(options) + test_options.enable_list = ["no-same-owner"] + # That is instantiated very often and do want to avoid ansible-galaxy + # install errors due to concurrency. + test_options.offline = True + return RulesCollection(rulesdirs=[DEFAULT_RULESDIR], options=test_options) + + +@pytest.fixture() +def default_text_runner(default_rules_collection: RulesCollection) -> RunFromText: + """Return RunFromText instance for the default set of collections.""" + return RunFromText(default_rules_collection) + + +@pytest.fixture() +def rule_runner(request: SubRequest, config_options: Options) -> RunFromText: + """Return runner for a specific rule class.""" + rule_class = request.param + config_options.enable_list.append(rule_class().id) + collection = RulesCollection(options=config_options) + collection.register(rule_class()) + return RunFromText(collection) + + +@pytest.fixture(name="config_options") +def fixture_config_options() -> Iterator[Options]: + """Return configuration options that will be restored after testrun.""" + global options # pylint: disable=global-statement,invalid-name # noqa: PLW0603 + original_options = copy.deepcopy(options) + yield options + options = original_options diff --git a/src/ansiblelint/text.py b/src/ansiblelint/text.py new file mode 100644 index 0000000..038fde1 --- /dev/null +++ b/src/ansiblelint/text.py @@ -0,0 +1,49 @@ +"""Text utils.""" +from __future__ import annotations + +import re +from functools import cache + +RE_HAS_JINJA = re.compile(r"{[{%#].*[%#}]}", re.DOTALL) +RE_HAS_GLOB = re.compile("[][*?]") + + +def strip_ansi_escape(data: str | bytes) -> str: + """Remove all ANSI escapes from string or bytes. + + If bytes is passed instead of string, it will be converted to string + using UTF-8. + """ + if isinstance(data, bytes): # pragma: no branch + data = data.decode("utf-8") + + return re.sub(r"\x1b[^m]*m", "", data) + + +def toidentifier(text: str) -> str: + """Convert unsafe chars to ones allowed in variables.""" + result = re.sub(r"[\s-]+", "_", text) + if not result.isidentifier(): + msg = f"Unable to convert role name '{text}' to valid variable name." + raise RuntimeError(msg) + return result + + +# https://www.python.org/dev/peps/pep-0616/ +def removeprefix(self: str, prefix: str) -> str: + """Remove prefix from string.""" + if self.startswith(prefix): + return self[len(prefix) :] + return self[:] + + +@cache +def has_jinja(value: str) -> bool: + """Return true if a string seems to contain jinja templating.""" + return bool(isinstance(value, str) and RE_HAS_JINJA.search(value)) + + +@cache +def has_glob(value: str) -> bool: + """Return true if a string looks like having a glob pattern.""" + return bool(isinstance(value, str) and RE_HAS_GLOB.search(value)) diff --git a/src/ansiblelint/transformer.py b/src/ansiblelint/transformer.py new file mode 100644 index 0000000..3716ef9 --- /dev/null +++ b/src/ansiblelint/transformer.py @@ -0,0 +1,147 @@ +"""Transformer implementation.""" +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Union, cast + +from ruamel.yaml.comments import CommentedMap, CommentedSeq + +from ansiblelint.file_utils import Lintable +from ansiblelint.rules import AnsibleLintRule, TransformMixin +from ansiblelint.yaml_utils import FormattedYAML, get_path_to_play, get_path_to_task + +if TYPE_CHECKING: + from ansiblelint.config import Options + from ansiblelint.errors import MatchError + from ansiblelint.runner import LintResult + +__all__ = ["Transformer"] + +_logger = logging.getLogger(__name__) + + +# pylint: disable=too-few-public-methods +class Transformer: + """Transformer class marshals transformations. + + The Transformer is similar to the ``ansiblelint.runner.Runner`` which manages + running each of the rules. We only expect there to be one ``Transformer`` instance + which should be instantiated from the main entrypoint function. + + In the future, the transformer will be responsible for running transforms for each + of the rule matches. For now, it just reads/writes YAML files which is a + pre-requisite for the planned rule-specific transforms. + """ + + def __init__(self, result: LintResult, options: Options): + """Initialize a Transformer instance.""" + self.write_set = self.effective_write_set(options.write_list) + + self.matches: list[MatchError] = result.matches + self.files: set[Lintable] = result.files + + lintables: dict[str, Lintable] = {file.filename: file for file in result.files} + self.matches_per_file: dict[Lintable, list[MatchError]] = { + file: [] for file in result.files + } + + for match in self.matches: + try: + lintable = lintables[match.filename] + except KeyError: + # we shouldn't get here, but this is easy to recover from so do that. + lintable = Lintable(match.filename) + self.matches_per_file[lintable] = [] + self.matches_per_file[lintable].append(match) + + @staticmethod + def effective_write_set(write_list: list[str]) -> set[str]: + """Simplify write_list based on ``"none"`` and ``"all"`` keywords. + + ``"none"`` resets the enabled rule transforms. + This returns ``{"none"}`` or a set of everything after the last ``"none"``. + + If ``"all"`` is in the ``write_list`` (after ``"none"`` if present), + then this will return ``{"all"}``. + """ + none_indexes = [i for i, value in enumerate(write_list) if value == "none"] + if none_indexes: + index = none_indexes[-1] + if len(write_list) > index + 1: + index += 1 + write_list = write_list[index:] + if "all" in write_list: + return {"all"} + return set(write_list) + + def run(self) -> None: + """For each file, read it, execute transforms on it, then write it.""" + for file, matches in self.matches_per_file.items(): + # str() convinces mypy that "text/yaml" is a valid Literal. + # Otherwise, it thinks base_kind is one of playbook, meta, tasks, ... + file_is_yaml = str(file.base_kind) == "text/yaml" + + try: + data: str = file.content + except (UnicodeDecodeError, IsADirectoryError): + # we hit a binary file (eg a jar or tar.gz) or a directory + data = "" + file_is_yaml = False + + ruamel_data: CommentedMap | CommentedSeq | None = None + if file_is_yaml: + # We need a fresh YAML() instance for each load because ruamel.yaml + # stores intermediate state during load which could affect loading + # any other files. (Based on suggestion from ruamel.yaml author) + yaml = FormattedYAML() + + ruamel_data = yaml.loads(data) + if not isinstance(ruamel_data, (CommentedMap, CommentedSeq)): + # This is an empty vars file or similar which loads as None. + # It is not safe to write this file or data-loss is likely. + # Only maps and sequences can preserve comments. Skip it. + _logger.debug( + "Ignored reformatting %s because current implementation in ruamel.yaml would drop comments. See https://sourceforge.net/p/ruamel-yaml/tickets/460/", + file, + ) + continue + + if self.write_set != {"none"}: + self._do_transforms(file, ruamel_data or data, file_is_yaml, matches) + + if file_is_yaml: + # noinspection PyUnboundLocalVariable + file.content = yaml.dumps(ruamel_data) + + if file.updated: + file.write() + + def _do_transforms( + self, + file: Lintable, + data: CommentedMap | CommentedSeq | str, + file_is_yaml: bool, + matches: list[MatchError], + ) -> None: + """Do Rule-Transforms handling any last-minute MatchError inspections.""" + for match in sorted(matches): + if not isinstance(match.rule, TransformMixin): + continue + if self.write_set != {"all"}: + rule = cast(AnsibleLintRule, match.rule) + rule_definition = set(rule.tags) + rule_definition.add(rule.id) + if rule_definition.isdisjoint(self.write_set): + # rule transform not requested. Skip it. + continue + if file_is_yaml and not match.yaml_path: + data = cast(Union[CommentedMap, CommentedSeq], data) + if match.match_type == "play": + match.yaml_path = get_path_to_play(file, match.lineno, data) + elif match.task or file.kind in ( + "tasks", + "handlers", + "playbook", + ): + match.yaml_path = get_path_to_task(file, match.lineno, data) + match.rule.transform(match, file, data) diff --git a/src/ansiblelint/utils.py b/src/ansiblelint/utils.py new file mode 100644 index 0000000..9cb97aa --- /dev/null +++ b/src/ansiblelint/utils.py @@ -0,0 +1,1020 @@ +# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# spell-checker:ignore dwim +# pylint: disable=too-many-lines +"""Generic utility helpers.""" +from __future__ import annotations + +import contextlib +import inspect +import logging +import os +import re +from collections.abc import Generator, ItemsView, Iterator, Mapping, Sequence +from dataclasses import _MISSING_TYPE, dataclass, field +from functools import cache +from pathlib import Path +from typing import Any + +import yaml +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.parsing.dataloader import DataLoader +from ansible.parsing.mod_args import ModuleArgsParser +from ansible.parsing.yaml.constructor import AnsibleConstructor, AnsibleMapping +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence +from ansible.plugins.loader import add_all_plugin_dirs +from ansible.template import Templar +from ansible.utils.collection_loader import AnsibleCollectionConfig +from yaml.composer import Composer +from yaml.representer import RepresenterError + +from ansiblelint._internal.rules import ( + AnsibleParserErrorRule, + RuntimeErrorRule, +) +from ansiblelint.app import get_app +from ansiblelint.config import Options, options +from ansiblelint.constants import ( + ANNOTATION_KEYS, + FILENAME_KEY, + INCLUSION_ACTION_NAMES, + LINE_NUMBER_KEY, + NESTED_TASK_KEYS, + PLAYBOOK_TASK_KEYWORDS, + ROLE_IMPORT_ACTION_NAMES, + SKIPPED_RULES_KEY, + FileType, +) +from ansiblelint.errors import MatchError +from ansiblelint.file_utils import Lintable, discover_lintables +from ansiblelint.skip_utils import is_nested_task +from ansiblelint.text import removeprefix + +# ansible-lint doesn't need/want to know about encrypted secrets, so we pass a +# string as the password to enable such yaml files to be opened and parsed +# successfully. +DEFAULT_VAULT_PASSWORD = "x" # noqa: S105 +COLLECTION_PLAY_RE = re.compile(r"^[\w\d_]+\.[\w\d_]+\.[\w\d_]+$") + +PLAYBOOK_DIR = os.environ.get("ANSIBLE_PLAYBOOK_DIR", None) + + +_logger = logging.getLogger(__name__) + + +def parse_yaml_from_file(filepath: str) -> AnsibleBaseYAMLObject: + """Extract a decrypted YAML object from file.""" + dataloader = DataLoader() + if hasattr(dataloader, "set_vault_password"): + dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD) + return dataloader.load_from_file(filepath) + + +def path_dwim(basedir: str, given: str) -> str: + """Convert a given path do-what-I-mean style.""" + dataloader = DataLoader() + dataloader.set_basedir(basedir) + return str(dataloader.path_dwim(given)) + + +def ansible_templar(basedir: Path, templatevars: Any) -> Templar: + """Create an Ansible Templar using templatevars.""" + # `basedir` is the directory containing the lintable file. + # Therefore, for tasks in a role, `basedir` has the form + # `roles/some_role/tasks`. On the other hand, the search path + # is `roles/some_role/{files,templates}`. As a result, the + # `tasks` part in the basedir should be stripped stripped. + if basedir.name == "tasks": + basedir = basedir.parent + + dataloader = DataLoader() + dataloader.set_basedir(basedir) + templar = Templar(dataloader, variables=templatevars) + return templar + + +def mock_filter(left: Any, *args: Any, **kwargs: Any) -> Any: # noqa: ARG001 + """Mock a filter that can take any combination of args and kwargs. + + This will return x when x | filter(y,z) is called + e.g. {{ foo | ansible.utils.ipaddr('address') }} + + :param left: The left hand side of the filter + :param args: The args passed to the filter + :param kwargs: The kwargs passed to the filter + :return: The left hand side of the filter + """ + # pylint: disable=unused-argument + return left + + +def ansible_template( + basedir: Path, + varname: Any, + templatevars: Any, + **kwargs: Any, +) -> Any: + """Render a templated string by mocking missing filters. + + In the case of a missing lookup, ansible core does an early exit + when disable_lookup=True but this happens after the jinja2 syntax already passed + return the original string as if it had been templated. + + In the case of a missing filter, extract the missing filter plugin name + from the ansible error, 'Could not load "filter"'. Then mock the filter + and template the string again. The range allows for up to 10 unknown filters + in succession + + :param basedir: The directory containing the lintable file + :param varname: The string to be templated + :param templatevars: The variables to be used in the template + :param kwargs: Additional arguments to be passed to the templating engine + :return: The templated string or None + :raises: AnsibleError if the filter plugin cannot be extracted or the + string could not be templated in 10 attempts + """ + # pylint: disable=too-many-locals + filter_error = "template error while templating string:" + lookup_error = "was found, however lookups were disabled from templating" + re_filter_fqcn = re.compile(r"\w+\.\w+\.\w+") + re_filter_in_err = re.compile(r"Could not load \"(\w+)\"") + re_valid_filter = re.compile(r"^\w+(\.\w+\.\w+)?$") + templar = ansible_templar(basedir=basedir, templatevars=templatevars) + + kwargs["disable_lookups"] = True + for _i in range(10): + try: + templated = templar.template(varname, **kwargs) + return templated + except AnsibleError as exc: + if lookup_error in exc.message: + return varname + if exc.message.startswith(filter_error): + while True: + match = re_filter_in_err.search(exc.message) + if match: + missing_filter = match.group(1) + break + match = re_filter_fqcn.search(exc.message) + if match: + missing_filter = match.group(0) + break + missing_filter = exc.message.split("'")[1] + break + + if not re_valid_filter.match(missing_filter): + err = f"Could not parse missing filter name from error message: {exc.message}" + _logger.warning(err) + raise + + # pylint: disable=protected-access + templar.environment.filters._delegatee[ # noqa: SLF001 + missing_filter + ] = mock_filter + # Record the mocked filter so we can warn the user + if missing_filter not in options.mock_filters: + _logger.debug("Mocking missing filter %s", missing_filter) + options.mock_filters.append(missing_filter) + continue + raise + return None + + +BLOCK_NAME_TO_ACTION_TYPE_MAP = { + "tasks": "task", + "handlers": "handler", + "pre_tasks": "task", + "post_tasks": "task", + "block": "meta", + "rescue": "meta", + "always": "meta", +} + + +def tokenize(line: str) -> tuple[str, list[str], dict[str, str]]: + """Parse a string task invocation.""" + tokens = line.lstrip().split(" ") + if tokens[0] == "-": + tokens = tokens[1:] + if tokens[0] == "action:" or tokens[0] == "local_action:": + tokens = tokens[1:] + command = tokens[0].replace(":", "") + + args = [] + kwargs = {} + non_kv_found = False + for arg in tokens[1:]: + if "=" in arg and not non_kv_found: + key_value = arg.split("=", 1) + kwargs[key_value[0]] = key_value[1] + else: + non_kv_found = True + args.append(arg) + return (command, args, kwargs) + + +def playbook_items(pb_data: AnsibleBaseYAMLObject) -> ItemsView: # type: ignore[type-arg] + """Return a list of items from within the playbook.""" + if isinstance(pb_data, dict): + return pb_data.items() + if not pb_data: + return [] # type: ignore[return-value] + + # "if play" prevents failure if the play sequence contains None, + # which is weird but currently allowed by Ansible + # https://github.com/ansible/ansible-lint/issues/849 + return [item for play in pb_data if play for item in play.items()] # type: ignore[return-value] + + +def set_collections_basedir(basedir: Path) -> None: + """Set the playbook directory as playbook_paths for the collection loader.""" + # Ansible expects only absolute paths inside `playbook_paths` and will + # produce weird errors if we use a relative one. + AnsibleCollectionConfig.playbook_paths = str(basedir.resolve()) + + +def template( + basedir: Path, + value: Any, + variables: Any, + *, + fail_on_error: bool = False, + fail_on_undefined: bool = False, + **kwargs: str, +) -> Any: + """Attempt rendering a value with known vars.""" + try: + value = ansible_template( + basedir.resolve(), + value, + variables, + **dict(kwargs, fail_on_undefined=fail_on_undefined), + ) + # Hack to skip the following exception when using to_json filter on a variable. # noqa: FIX004 + # I guess the filter doesn't like empty vars... + except (AnsibleError, ValueError, RepresenterError): + # templating failed, so just keep value as is. + if fail_on_error: + raise + return value + + +def _include_children( + basedir: str, + k: str, + v: Any, + parent_type: FileType, +) -> list[Lintable]: + # handle special case include_tasks: name=filename.yml + if k in INCLUSION_ACTION_NAMES and isinstance(v, dict) and "file" in v: + v = v["file"] + + # we cannot really parse any jinja2 in includes, so we ignore them + if not v or "{{" in v: + return [] + + if "import_playbook" in k and COLLECTION_PLAY_RE.match(v): + # Any import_playbooks from collections should be ignored as ansible + # own syntax check will handle them. + return [] + + # handle include: filename.yml tags=blah + # pylint: disable=unused-variable + (command, args, kwargs) = tokenize(f"{k}: {v}") + + result = path_dwim(basedir, args[0]) + while basedir not in ["", "/"]: + if os.path.exists(result): + break + basedir = os.path.dirname(basedir) + result = path_dwim(basedir, args[0]) + + return [Lintable(result, kind=parent_type)] + + +def _taskshandlers_children( + basedir: str, + k: str, + v: None | Any, + parent_type: FileType, +) -> list[Lintable]: + results: list[Lintable] = [] + if v is None: + raise MatchError( + message="A malformed block was encountered while loading a block.", + rule=RuntimeErrorRule(), + ) + for task_handler in v: + # ignore empty tasks, `-` + if not task_handler: + continue + + with contextlib.suppress(LookupError): + children = _get_task_handler_children_for_tasks_or_playbooks( + task_handler, + basedir, + k, + parent_type, + ) + results.append(children) + continue + + if any(x in task_handler for x in ROLE_IMPORT_ACTION_NAMES): + task_handler = normalize_task_v2(task_handler) + _validate_task_handler_action_for_role(task_handler["action"]) + results.extend( + _roles_children( + basedir, + k, + [task_handler["action"].get("name")], + parent_type, + main=task_handler["action"].get("tasks_from", "main"), + ), + ) + continue + + if "block" not in task_handler: + continue + + results.extend( + _taskshandlers_children(basedir, k, task_handler["block"], parent_type), + ) + if "rescue" in task_handler: + results.extend( + _taskshandlers_children( + basedir, + k, + task_handler["rescue"], + parent_type, + ), + ) + if "always" in task_handler: + results.extend( + _taskshandlers_children( + basedir, + k, + task_handler["always"], + parent_type, + ), + ) + + return results + + +def _get_task_handler_children_for_tasks_or_playbooks( + task_handler: dict[str, Any], + basedir: str, + k: Any, + parent_type: FileType, +) -> Lintable: + """Try to get children of taskhandler for include/import tasks/playbooks.""" + child_type = k if parent_type == "playbook" else parent_type + + # Include the FQCN task names as this happens before normalize + for task_handler_key in INCLUSION_ACTION_NAMES: + with contextlib.suppress(KeyError): + # ignore empty tasks + if not task_handler: # pragma: no branch + continue + + file_name = task_handler[task_handler_key] + if isinstance(file_name, Mapping) and file_name.get("file", None): + file_name = file_name["file"] + + f = path_dwim(basedir, file_name) + while basedir not in ["", "/"]: + if os.path.exists(f): + break + basedir = os.path.dirname(basedir) + f = path_dwim(basedir, file_name) + return Lintable(f, kind=child_type) + msg = f'The node contains none of: {", ".join(sorted(INCLUSION_ACTION_NAMES))}' + raise LookupError(msg) + + +def _validate_task_handler_action_for_role(th_action: dict[str, Any]) -> None: + """Verify that the task handler action is valid for role include.""" + module = th_action["__ansible_module__"] + + if "name" not in th_action: + raise MatchError(message=f"Failed to find required 'name' key in {module!s}") + + if not isinstance(th_action["name"], str): + raise MatchError( + message=f"Value assigned to 'name' key on '{module!s}' is not a string.", + ) + + +def _roles_children( + basedir: str, + k: str, + v: Sequence[Any], + parent_type: FileType, # noqa: ARG001 + main: str = "main", +) -> list[Lintable]: + # pylint: disable=unused-argument # parent_type) + results: list[Lintable] = [] + if not v: + # typing does not prevent junk from being passed in + return results + for role in v: + if isinstance(role, dict): + if "role" in role or "name" in role: + if "tags" not in role or "skip_ansible_lint" not in role["tags"]: + results.extend( + _look_for_role_files( + basedir, + role.get("role", role.get("name")), + main=main, + ), + ) + elif k != "dependencies": + msg = f'role dict {role} does not contain a "role" or "name" key' + raise SystemExit(msg) + else: + results.extend(_look_for_role_files(basedir, role, main=main)) + return results + + +def _rolepath(basedir: str, role: str) -> str | None: + role_path = None + + possible_paths = [ + # if included from a playbook + path_dwim(basedir, os.path.join("roles", role)), + path_dwim(basedir, role), + # if included from roles/[role]/meta/main.yml + path_dwim(basedir, os.path.join("..", "..", "..", "roles", role)), + path_dwim(basedir, os.path.join("..", "..", role)), + # if checking a role in the current directory + path_dwim(basedir, os.path.join("..", role)), + ] + + for loc in get_app(offline=True).runtime.config.default_roles_path: + loc = os.path.expanduser(loc) + possible_paths.append(path_dwim(loc, role)) + + possible_paths.append(path_dwim(basedir, "")) + + for path_option in possible_paths: # pragma: no branch + if os.path.isdir(path_option): + role_path = path_option + break + + if role_path: # pragma: no branch + add_all_plugin_dirs(role_path) + + return role_path + + +def _look_for_role_files( + basedir: str, + role: str, + main: str | None = "main", # noqa: ARG001 +) -> list[Lintable]: + # pylint: disable=unused-argument # main + role_path = _rolepath(basedir, role) + if not role_path: # pragma: no branch + return [] + + results = [] + + for kind in ["tasks", "meta", "handlers", "vars", "defaults"]: + current_path = os.path.join(role_path, kind) + for folder, _, files in os.walk(current_path): + for file in files: + file_ignorecase = file.lower() + if file_ignorecase.endswith((".yml", ".yaml")): + results.append(Lintable(os.path.join(folder, file))) + + return results + + +def _sanitize_task(task: dict[str, Any]) -> dict[str, Any]: + """Return a stripped-off task structure compatible with new Ansible. + + This helper takes a copy of the incoming task and drops + any internally used keys from it. + """ + result = task.copy() + # task is an AnsibleMapping which inherits from OrderedDict, so we need + # to use `del` to remove unwanted keys. + for k in [SKIPPED_RULES_KEY, FILENAME_KEY, LINE_NUMBER_KEY]: + if k in result: + del result[k] + return result + + +def _extract_ansible_parsed_keys_from_task( + result: dict[str, Any], + task: dict[str, Any], + keys: tuple[str, ...], +) -> dict[str, Any]: + """Return a dict with existing key in task.""" + for k, v in list(task.items()): + if k in keys: + # we don't want to re-assign these values, which were + # determined by the ModuleArgsParser() above + continue + result[k] = v + return result + + +def normalize_task_v2(task: dict[str, Any]) -> dict[str, Any]: + """Ensure tasks have a normalized action key and strings are converted to python objects.""" + result: dict[str, Any] = {} + ansible_parsed_keys = ("action", "local_action", "args", "delegate_to") + + if is_nested_task(task): + _extract_ansible_parsed_keys_from_task(result, task, ansible_parsed_keys) + # Add dummy action for block/always/rescue statements + result["action"] = { + "__ansible_module__": "block/always/rescue", + "__ansible_module_original__": "block/always/rescue", + } + + return result + + sanitized_task = _sanitize_task(task) + mod_arg_parser = ModuleArgsParser(sanitized_task) + + try: + action, arguments, result["delegate_to"] = mod_arg_parser.parse( + skip_action_validation=options.skip_action_validation, + ) + except AnsibleParserError as exc: + # pylint: disable=raise-missing-from + raise MatchError( + rule=AnsibleParserErrorRule(), + message=exc.message, + filename=task.get(FILENAME_KEY, "Unknown"), + lineno=task.get(LINE_NUMBER_KEY, 0), + ) from exc + + # denormalize shell -> command conversion + if "_uses_shell" in arguments: + action = "shell" + del arguments["_uses_shell"] + + _extract_ansible_parsed_keys_from_task( + result, + task, + (*ansible_parsed_keys, action), + ) + + if not isinstance(action, str): + msg = f"Task actions can only be strings, got {action}" + raise RuntimeError(msg) + action_unnormalized = action + # convert builtin fqn calls to short forms because most rules know only + # about short calls but in the future we may switch the normalization to do + # the opposite. Mainly we currently consider normalized the module listing + # used by `ansible-doc -t module -l 2>/dev/null` + action = removeprefix(action, "ansible.builtin.") + result["action"] = { + "__ansible_module__": action, + "__ansible_module_original__": action_unnormalized, + } + + result["action"].update(arguments) + return result + + +def normalize_task(task: dict[str, Any], filename: str) -> dict[str, Any]: + """Unify task-like object structures.""" + ansible_action_type = task.get("__ansible_action_type__", "task") + if "__ansible_action_type__" in task: + del task["__ansible_action_type__"] + task = normalize_task_v2(task) + task[FILENAME_KEY] = filename + task["__ansible_action_type__"] = ansible_action_type + return task + + +def task_to_str(task: dict[str, Any]) -> str: + """Make a string identifier for the given task.""" + name = task.get("name") + if name: + return str(name) + action = task.get("action") + if isinstance(action, str) or not isinstance(action, dict): + return str(action) + args = [ + f"{k}={v}" + for (k, v) in action.items() + if k + not in [ + "__ansible_module__", + "__ansible_module_original__", + "_raw_params", + LINE_NUMBER_KEY, + FILENAME_KEY, + ] + ] + + _raw_params = action.get("_raw_params", []) + if isinstance(_raw_params, list): + for item in _raw_params: + args.append(str(item)) + else: + args.append(_raw_params) + + return f"{action['__ansible_module__']} {' '.join(args)}" + + +def extract_from_list( + blocks: AnsibleBaseYAMLObject, + candidates: list[str], + *, + recursive: bool = False, +) -> list[Any]: + """Get action tasks from block structures.""" + results = [] + for block in blocks: + for candidate in candidates: + if isinstance(block, dict) and candidate in block: + if isinstance(block[candidate], list): + subresults = add_action_type(block[candidate], candidate) + if recursive: + subresults.extend( + extract_from_list( + subresults, + candidates, + recursive=recursive, + ), + ) + results.extend(subresults) + elif block[candidate] is not None: + msg = f"Key '{candidate}' defined, but bad value: '{block[candidate]!s}'" + raise RuntimeError(msg) + return results + + +@dataclass +class Task(dict[str, Any]): + """Class that represents a task from linter point of view. + + raw_task: + When looping through the tasks in the file, each "raw_task" is minimally + processed to include these special keys: __line__, __file__, skipped_rules. + normalized_task: + When each raw_task is "normalized", action shorthand (strings) get parsed + by ansible into python objects and the action key gets normalized. If the task + should be skipped (skipped is True) or normalizing it fails (error is not None) + then this is just the raw_task instead of a normalized copy. + skip_tags: + List of tags found to be skipped, from tags block or noqa comments + error: + This is normally None. It will be a MatchError when the raw_task cannot be + normalized due to an AnsibleParserError. + position: Any + """ + + raw_task: dict[str, Any] + filename: str = "" + _normalized_task: dict[str, Any] | _MISSING_TYPE = field(init=False, repr=False) + error: MatchError | None = None + position: Any = None + + @property + def name(self) -> str | None: + """Return the name of the task.""" + return self.raw_task.get("name", None) + + @property + def action(self) -> str: + """Return the resolved action name.""" + action_name = self.normalized_task["action"]["__ansible_module_original__"] + if not isinstance(action_name, str): + msg = "Task actions can only be strings." + raise RuntimeError(msg) + return action_name + + @property + def args(self) -> Any: + """Return the arguments passed to the task action. + + While we usually expect to return a dictionary, it can also + return a templated string when jinja is used. + """ + if "args" in self.raw_task: + return self.raw_task["args"] + result = {} + for k, v in self.normalized_task["action"].items(): + if k not in ANNOTATION_KEYS: + result[k] = v + return result + + @property + def normalized_task(self) -> dict[str, Any]: + """Return the name of the task.""" + if not hasattr(self, "_normalized_task"): + try: + self._normalized_task = normalize_task( + self.raw_task, + filename=self.filename, + ) + except MatchError as err: + self.error = err + # When we cannot normalize it, we just use the raw task instead + # to avoid adding extra complexity to the rules. + self._normalized_task = self.raw_task + if isinstance(self._normalized_task, _MISSING_TYPE): + msg = "Task was not normalized" + raise RuntimeError(msg) + return self._normalized_task + + @property + def skip_tags(self) -> list[str]: + """Return the list of tags to skip.""" + skip_tags: list[str] = self.raw_task.get(SKIPPED_RULES_KEY, []) + return skip_tags + + def __repr__(self) -> str: + """Return a string representation of the task.""" + return f"Task('{self.name}' [{self.position}])" + + def get(self, key: str, default: Any = None) -> Any: + """Get a value from the task.""" + return self.normalized_task.get(key, default) + + def __getitem__(self, index: str) -> Any: + """Allow access as task[...].""" + return self.normalized_task[index] + + def __iter__(self) -> Generator[str, None, None]: + """Provide support for 'key in task'.""" + yield from (f for f in self.normalized_task) + + +def task_in_list( + data: AnsibleBaseYAMLObject, + file: Lintable, + kind: str, + position: str = ".", +) -> Iterator[Task]: + """Get action tasks from block structures.""" + + def each_entry(data: AnsibleBaseYAMLObject, position: str) -> Iterator[Task]: + if not data: + return + for entry_index, entry in enumerate(data): + if not entry: + continue + _pos = f"{position}[{entry_index}]" + if isinstance(entry, dict): + yield Task( + entry, + position=_pos, + ) + for block in [k for k in entry if k in NESTED_TASK_KEYS]: + yield from task_in_list( + data=entry[block], + file=file, + kind="tasks", + position=f"{_pos}.{block}", + ) + + if not isinstance(data, list): + return + if kind == "playbook": + attributes = ["tasks", "pre_tasks", "post_tasks", "handlers"] + for item_index, item in enumerate(data): + for attribute in attributes: + if not isinstance(item, dict): + continue + if attribute in item: + if isinstance(item[attribute], list): + yield from each_entry( + item[attribute], + f"{position }[{item_index}].{attribute}", + ) + elif item[attribute] is not None: + msg = f"Key '{attribute}' defined, but bad value: '{item[attribute]!s}'" + raise RuntimeError(msg) + else: + yield from each_entry(data, position) + + +def add_action_type(actions: AnsibleBaseYAMLObject, action_type: str) -> list[Any]: + """Add action markers to task objects.""" + results = [] + for action in actions: + # ignore empty task + if not action: + continue + action["__ansible_action_type__"] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type] + results.append(action) + return results + + +def get_action_tasks(data: AnsibleBaseYAMLObject, file: Lintable) -> list[Any]: + """Get a flattened list of action tasks from the file.""" + tasks = [] + if file.kind in ["tasks", "handlers"]: + tasks = add_action_type(data, file.kind) + else: + tasks.extend(extract_from_list(data, PLAYBOOK_TASK_KEYWORDS)) + + # Add sub-elements of block/rescue/always to tasks list + tasks.extend(extract_from_list(tasks, NESTED_TASK_KEYS, recursive=True)) + + return tasks + + +@cache +def parse_yaml_linenumbers( + lintable: Lintable, +) -> AnsibleBaseYAMLObject: + """Parse yaml as ansible.utils.parse_yaml but with linenumbers. + + The line numbers are stored in each node's LINE_NUMBER_KEY key. + """ + result = [] + + def compose_node(parent: yaml.nodes.Node, index: int) -> yaml.nodes.Node: + # the line number where the previous token has ended (plus empty lines) + line = loader.line + node = Composer.compose_node(loader, parent, index) + if not isinstance(node, yaml.nodes.Node): + msg = "Unexpected yaml data." + raise RuntimeError(msg) + node.__line__ = line + 1 # type: ignore[attr-defined] + return node + + def construct_mapping( + node: AnsibleBaseYAMLObject, + *, + deep: bool = False, + ) -> AnsibleMapping: + mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep) + if hasattr(node, "__line__"): + mapping[LINE_NUMBER_KEY] = node.__line__ + else: + mapping[ + LINE_NUMBER_KEY + ] = mapping._line_number # pylint: disable=protected-access # noqa: SLF001 + mapping[FILENAME_KEY] = lintable.path + return mapping + + try: + kwargs = {} + if "vault_password" in inspect.getfullargspec(AnsibleLoader.__init__).args: + kwargs["vault_password"] = DEFAULT_VAULT_PASSWORD + loader = AnsibleLoader(lintable.content, **kwargs) + loader.compose_node = compose_node + loader.construct_mapping = construct_mapping + # while Ansible only accepts single documents, we also need to load + # multi-documents, as we attempt to load any YAML file, not only + # Ansible managed ones. + while True: + data = loader.get_data() + if data is None: + break + result.append(data) + except ( + yaml.parser.ParserError, + yaml.scanner.ScannerError, + yaml.constructor.ConstructorError, + ) as exc: + msg = "Failed to load YAML file" + raise RuntimeError(msg) from exc + + if len(result) == 0: + return None # empty documents + if len(result) == 1: + return result[0] + return result + + +def get_cmd_args(task: dict[str, Any]) -> str: + """Extract the args from a cmd task as a string.""" + if "cmd" in task["action"]: + args = task["action"]["cmd"] + else: + args = task["action"].get("_raw_params", []) + if not isinstance(args, str): + return " ".join(args) + return args + + +def get_first_cmd_arg(task: dict[str, Any]) -> Any: + """Extract the first arg from a cmd task.""" + try: + first_cmd_arg = get_cmd_args(task).split()[0] + except IndexError: + return None + return first_cmd_arg + + +def get_second_cmd_arg(task: dict[str, Any]) -> Any: + """Extract the second arg from a cmd task.""" + try: + second_cmd_arg = get_cmd_args(task).split()[1] + except IndexError: + return None + return second_cmd_arg + + +def is_playbook(filename: str) -> bool: + """Check if the file is a playbook. + + Given a filename, it should return true if it looks like a playbook. The + function is not supposed to raise exceptions. + """ + # we assume is a playbook if we loaded a sequence of dictionaries where + # at least one of these keys is present: + playbooks_keys = { + "gather_facts", + "hosts", + "import_playbook", + "post_tasks", + "pre_tasks", + "roles", + "tasks", + } + + # makes it work with Path objects by converting them to strings + if not isinstance(filename, str): + filename = str(filename) + + try: + f = parse_yaml_from_file(filename) + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 + _logger.warning( + "Failed to load %s with %s, assuming is not a playbook.", + filename, + exc, + ) + else: + if ( + isinstance(f, AnsibleSequence) + and hasattr(next(iter(f), {}), "keys") + and playbooks_keys.intersection(next(iter(f), {}).keys()) + ): + return True + return False + + +# pylint: disable=too-many-statements +def get_lintables( + opts: Options = options, + args: list[str] | None = None, +) -> list[Lintable]: + """Detect files and directories that are lintable.""" + lintables: list[Lintable] = [] + + # passing args bypass auto-detection mode + if args: + for arg in args: + lintable = Lintable(arg) + lintables.append(lintable) + else: + for filename in discover_lintables(opts): + path = Path(filename) + lintables.append(Lintable(path)) + + # stage 2: guess roles from current lintables, as there is no unique + # file that must be present in any kind of role. + _extend_with_roles(lintables) + + return lintables + + +def _extend_with_roles(lintables: list[Lintable]) -> None: + """Detect roles among lintables and adds them to the list.""" + for lintable in lintables: + parts = lintable.path.parent.parts + if "roles" in parts: + role = lintable.path + while role.parent.name != "roles" and role.name: + role = role.parent + if role.exists() and not role.is_file(): + lintable = Lintable(role) + if lintable.kind == "role" and lintable not in lintables: + _logger.debug("Added role: %s", lintable) + lintables.append(lintable) + + +def convert_to_boolean(value: Any) -> bool: + """Use Ansible to convert something to a boolean.""" + return bool(boolean(value)) diff --git a/src/ansiblelint/version.py b/src/ansiblelint/version.py new file mode 100644 index 0000000..a65c3cf --- /dev/null +++ b/src/ansiblelint/version.py @@ -0,0 +1,14 @@ +"""Ansible-lint version information.""" +try: + from ._version import version as __version__ +except ImportError: # pragma: no cover + try: + import pkg_resources + + __version__ = pkg_resources.get_distribution("ansible-lint").version + except Exception: # pylint: disable=broad-except # noqa: BLE001 + # this is the fallback SemVer version picked by setuptools_scm when tag + # information is not available. + __version__ = "0.1.dev1" + +__all__ = ("__version__",) diff --git a/src/ansiblelint/yaml_utils.py b/src/ansiblelint/yaml_utils.py new file mode 100644 index 0000000..cc7e9ef --- /dev/null +++ b/src/ansiblelint/yaml_utils.py @@ -0,0 +1,1119 @@ +"""Utility helpers to simplify working with yaml-based data.""" +# pylint: disable=too-many-lines +from __future__ import annotations + +import functools +import logging +import os +import re +from collections.abc import Iterator, Sequence +from io import StringIO +from pathlib import Path +from re import Pattern +from typing import TYPE_CHECKING, Any, Callable, Union, cast + +import ruamel.yaml.events +from ruamel.yaml.comments import CommentedMap, CommentedSeq, Format +from ruamel.yaml.constructor import RoundTripConstructor +from ruamel.yaml.emitter import Emitter, ScalarAnalysis + +# Module 'ruamel.yaml' does not explicitly export attribute 'YAML'; implicit reexport disabled +# To make the type checkers happy, we import from ruamel.yaml.main instead. +from ruamel.yaml.main import YAML +from ruamel.yaml.scalarint import ScalarInt +from yamllint.config import YamlLintConfig + +from ansiblelint.constants import ( + ANNOTATION_KEYS, + NESTED_TASK_KEYS, + PLAYBOOK_TASK_KEYWORDS, +) +from ansiblelint.utils import Task + +if TYPE_CHECKING: + # noinspection PyProtectedMember + from ruamel.yaml.comments import LineCol # pylint: disable=ungrouped-imports + from ruamel.yaml.nodes import ScalarNode + from ruamel.yaml.representer import RoundTripRepresenter + from ruamel.yaml.tokens import CommentToken + + from ansiblelint.file_utils import Lintable + +_logger = logging.getLogger(__name__) + +YAMLLINT_CONFIG = """ +extends: default +rules: + comments: + # https://github.com/prettier/prettier/issues/6780 + min-spaces-from-content: 1 + # https://github.com/adrienverge/yamllint/issues/384 + comments-indentation: false + document-start: disable + # 160 chars was the default used by old E204 rule, but + # you can easily change it or disable in your .yamllint file. + line-length: + max: 160 + # We are adding an extra space inside braces as that's how prettier does it + # and we are trying not to fight other linters. + braces: + min-spaces-inside: 0 # yamllint defaults to 0 + max-spaces-inside: 1 # yamllint defaults to 0 + octal-values: + forbid-implicit-octal: true # yamllint defaults to false + forbid-explicit-octal: true # yamllint defaults to false +""" + + +def deannotate(data: Any) -> Any: + """Remove our annotations like __file__ and __line__ and return a JSON serializable object.""" + if isinstance(data, dict): + result = data.copy() + for key, value in data.items(): + if key in ANNOTATION_KEYS: + del result[key] + else: + result[key] = deannotate(value) + return result + if isinstance(data, list): + return [deannotate(item) for item in data if item not in ANNOTATION_KEYS] + return data + + +@functools.lru_cache(maxsize=1) +def load_yamllint_config() -> YamlLintConfig: + """Load our default yamllint config and any customized override file.""" + config = YamlLintConfig(content=YAMLLINT_CONFIG) + # if we detect local yamllint config we use it but raise a warning + # as this is likely to get out of sync with our internal config. + for path in [ + ".yamllint", + ".yamllint.yaml", + ".yamllint.yml", + os.getenv("YAMLLINT_CONFIG_FILE", ""), + os.getenv("XDG_CONFIG_HOME", "~/.config") + "/yamllint/config", + ]: + file = Path(path).expanduser() + if file.is_file(): + _logger.debug( + "Loading custom %s config file, this extends our " + "internal yamllint config.", + file, + ) + config_override = YamlLintConfig(file=str(file)) + config_override.extend(config) + config = config_override + break + _logger.debug("Effective yamllint rules used: %s", config.rules) + return config + + +def nested_items_path( + data_collection: dict[Any, Any] | list[Any], + ignored_keys: Sequence[str] = (), +) -> Iterator[tuple[Any, Any, list[str | int]]]: + """Iterate a nested data structure, yielding key/index, value, and parent_path. + + This is a recursive function that calls itself for each nested layer of data. + Each iteration yields: + + 1. the current item's dictionary key or list index, + 2. the current item's value, and + 3. the path to the current item from the outermost data structure. + + For dicts, the yielded (1) key and (2) value are what ``dict.items()`` yields. + For lists, the yielded (1) index and (2) value are what ``enumerate()`` yields. + The final component, the parent path, is a list of dict keys and list indexes. + The parent path can be helpful in providing error messages that indicate + precisely which part of a yaml file (or other data structure) needs to be fixed. + + For example, given this playbook: + + .. code-block:: yaml + + - name: A play + tasks: + - name: A task + debug: + msg: foobar + + Here's the first and last yielded items: + + .. code-block:: python + + >>> playbook=[{"name": "a play", "tasks": [{"name": "a task", "debug": {"msg": "foobar"}}]}] + >>> next( nested_items_path( playbook ) ) + (0, {'name': 'a play', 'tasks': [{'name': 'a task', 'debug': {'msg': 'foobar'}}]}, []) + >>> list( nested_items_path( playbook ) )[-1] + ('msg', 'foobar', [0, 'tasks', 0, 'debug']) + + Note that, for outermost data structure, the parent path is ``[]`` because + you do not need to descend into any nested dicts or lists to find the indicated + key and value. + + If a rule were designed to prohibit "foobar" debug messages, it could use the + parent path to provide a path to the problematic ``msg``. It might use a jq-style + path in its error message: "the error is at ``.[0].tasks[0].debug.msg``". + Or if a utility could automatically fix issues, it could use the path to descend + to the parent object using something like this: + + .. code-block:: python + + target = data + for segment in parent_path: + target = target[segment] + + :param data_collection: The nested data (dicts or lists). + + :returns: each iteration yields the key (of the parent dict) or the index (lists) + """ + # As typing and mypy cannot effectively ensure we are called only with + # valid data, we better ignore NoneType + if data_collection is None: + return + data: dict[Any, Any] | list[Any] + if isinstance(data_collection, Task): + data = data_collection.normalized_task + else: + data = data_collection + yield from _nested_items_path( + data_collection=data, + parent_path=[], + ignored_keys=ignored_keys, + ) + + +def _nested_items_path( + data_collection: dict[Any, Any] | list[Any], + parent_path: list[str | int], + ignored_keys: Sequence[str] = (), +) -> Iterator[tuple[Any, Any, list[str | int]]]: + """Iterate through data_collection (internal implementation of nested_items_path). + + This is a separate function because callers of nested_items_path should + not be using the parent_path param which is used in recursive _nested_items_path + calls to build up the path to the parent object of the current key/index, value. + """ + # we have to cast each convert_to_tuples assignment or mypy complains + # that both assignments (for dict and list) do not have the same type + convert_to_tuples_type = Callable[[], Iterator[tuple[Union[str, int], Any]]] + if isinstance(data_collection, dict): + convert_data_collection_to_tuples = cast( + convert_to_tuples_type, + functools.partial(data_collection.items), + ) + elif isinstance(data_collection, list): + convert_data_collection_to_tuples = cast( + convert_to_tuples_type, + functools.partial(enumerate, data_collection), + ) + else: + msg = f"Expected a dict or a list but got {data_collection!r} of type '{type(data_collection)}'" + raise TypeError(msg) + for key, value in convert_data_collection_to_tuples(): + if key in (*ANNOTATION_KEYS, *ignored_keys): + continue + yield key, value, parent_path + if isinstance(value, (dict, list)): + yield from _nested_items_path( + data_collection=value, + parent_path=[*parent_path, key], + ) + + +def get_path_to_play( + lintable: Lintable, + lineno: int, # 1-based + ruamel_data: CommentedMap | CommentedSeq, +) -> list[str | int]: + """Get the path to the play in the given file at the given line number.""" + if lineno < 1: + msg = f"expected lineno >= 1, got {lineno}" + raise ValueError(msg) + if lintable.kind != "playbook" or not isinstance(ruamel_data, CommentedSeq): + return [] + lc: LineCol # lc uses 0-based counts # pylint: disable=invalid-name + # lineno is 1-based. Convert to 0-based. + line_index = lineno - 1 + + prev_play_line_index = ruamel_data.lc.line + last_play_index = len(ruamel_data) + for play_index, play in enumerate(ruamel_data): + next_play_index = play_index + 1 + if last_play_index > next_play_index: + next_play_line_index = ruamel_data[next_play_index].lc.line + else: + next_play_line_index = None + + lc = play.lc # pylint: disable=invalid-name + if not isinstance(lc.line, int): + msg = f"expected lc.line to be an int, got {lc.line!r}" + raise RuntimeError(msg) + if lc.line == line_index: + return [play_index] + if play_index > 0 and prev_play_line_index < line_index < lc.line: + return [play_index - 1] + # The previous play check (above) can't catch the last play, + # so, handle the last play separately. + if ( + next_play_index == last_play_index + and line_index > lc.line + and (next_play_line_index is None or line_index < next_play_line_index) + ): + # part of this (last) play + return [play_index] + prev_play_line_index = play.lc.line + return [] + + +def get_path_to_task( + lintable: Lintable, + lineno: int, # 1-based + ruamel_data: CommentedMap | CommentedSeq, +) -> list[str | int]: + """Get the path to the task in the given file at the given line number.""" + if lineno < 1: + msg = f"expected lineno >= 1, got {lineno}" + raise ValueError(msg) + if lintable.kind in ("tasks", "handlers", "playbook"): + if not isinstance(ruamel_data, CommentedSeq): + msg = f"expected ruamel_data to be a CommentedSeq, got {ruamel_data!r}" + raise ValueError(msg) + if lintable.kind in ("tasks", "handlers"): + return _get_path_to_task_in_tasks_block(lineno, ruamel_data) + if lintable.kind == "playbook": + return _get_path_to_task_in_playbook(lineno, ruamel_data) + + return [] + + +def _get_path_to_task_in_playbook( + lineno: int, # 1-based + ruamel_data: CommentedSeq, +) -> list[str | int]: + """Get the path to the task in the given playbook data at the given line number.""" + last_play_index = len(ruamel_data) + for play_index, play in enumerate(ruamel_data): + next_play_index = play_index + 1 + if last_play_index > next_play_index: + next_play_line_index = ruamel_data[next_play_index].lc.line + else: + next_play_line_index = None + + play_keys = list(play.keys()) + for tasks_keyword in PLAYBOOK_TASK_KEYWORDS: + if not play.get(tasks_keyword): + continue + + try: + next_keyword = play_keys[play_keys.index(tasks_keyword) + 1] + except IndexError: + next_block_line_index = None + else: + next_block_line_index = play.lc.data[next_keyword][0] + # last_lineno_in_block is 1-based; next_*_line_index is 0-based + # next_*_line_index - 1 to get line before next_*_line_index. + # Then + 1 to make it a 1-based number. + if next_block_line_index is not None: + last_lineno_in_block = next_block_line_index + elif next_play_line_index is not None: + last_lineno_in_block = next_play_line_index + else: + last_lineno_in_block = None + + task_path = _get_path_to_task_in_tasks_block( + lineno, + play[tasks_keyword], + last_lineno_in_block, + ) + if task_path: + # mypy gets confused without this typehint + tasks_keyword_path: list[int | str] = [ + play_index, + tasks_keyword, + ] + return tasks_keyword_path + list(task_path) + # lineno is before first play or no tasks keywords in any of the plays + return [] + + +def _get_path_to_task_in_tasks_block( + lineno: int, # 1-based + tasks_block: CommentedSeq, + last_lineno: int | None = None, # 1-based +) -> list[str | int]: + """Get the path to the task in the given tasks block at the given line number.""" + task: CommentedMap | None + # lineno and last_lineno are 1-based. Convert to 0-based. + line_index = lineno - 1 + last_line_index = None if last_lineno is None else last_lineno - 1 + + # lc (LineCol) uses 0-based counts + prev_task_line_index = tasks_block.lc.line + last_task_index = len(tasks_block) + for task_index, task in enumerate(tasks_block): + next_task_index = task_index + 1 + if last_task_index > next_task_index: + if tasks_block[next_task_index] is not None: + next_task_line_index = tasks_block[next_task_index].lc.line + else: + next_task_line_index = tasks_block.lc.item(next_task_index)[0] + else: + next_task_line_index = None + + if task is None: + # create a dummy task to represent the null task + task = CommentedMap() + task.lc.line, task.lc.col = tasks_block.lc.item(task_index) + + nested_task_keys = set(task.keys()).intersection(set(NESTED_TASK_KEYS)) + if nested_task_keys: + subtask_path = _get_path_to_task_in_nested_tasks_block( + lineno, + task, + nested_task_keys, + next_task_line_index, + ) + if subtask_path: + # mypy gets confused without this typehint + task_path: list[str | int] = [task_index] + return task_path + list(subtask_path) + + if not isinstance(task.lc.line, int): + msg = f"expected task.lc.line to be an int, got {task.lc.line!r}" + raise RuntimeError(msg) + if task.lc.line == line_index: + return [task_index] + if task_index > 0 and prev_task_line_index < line_index < task.lc.line: + return [task_index - 1] + # The previous task check can't catch the last task, + # so, handle the last task separately (also after subtask checks). + # pylint: disable=too-many-boolean-expressions + if ( + next_task_index == last_task_index + and line_index > task.lc.line + and (next_task_line_index is None or line_index < next_task_line_index) + and (last_line_index is None or line_index <= last_line_index) + ): + # part of this (last) task + return [task_index] + prev_task_line_index = task.lc.line + # line is not part of this tasks block + return [] + + +def _get_path_to_task_in_nested_tasks_block( + lineno: int, # 1-based + task: CommentedMap, + nested_task_keys: set[str], + next_task_line_index: int | None = None, # 0-based +) -> list[str | int]: + """Get the path to the task in the given nested tasks block.""" + # loop through the keys in line order + task_keys = list(task.keys()) + task_keys_by_index = dict(enumerate(task_keys)) + for task_index, task_key in enumerate(task_keys): + nested_task_block = task[task_key] + if task_key not in nested_task_keys or not nested_task_block: + continue + next_task_key = task_keys_by_index.get(task_index + 1, None) + if next_task_key is not None: + next_task_key_line_index = task.lc.data[next_task_key][0] + else: + next_task_key_line_index = None + # last_lineno_in_block is 1-based; next_*_line_index is 0-based + # next_*_line_index - 1 to get line before next_*_line_index. + # Then + 1 to make it a 1-based number. + last_lineno_in_block = ( + next_task_key_line_index + if next_task_key_line_index is not None + else next_task_line_index + ) + subtask_path = _get_path_to_task_in_tasks_block( + lineno, + nested_task_block, + last_lineno_in_block, # 1-based + ) + if subtask_path: + return [task_key, *list(subtask_path)] + # line is not part of this nested tasks block + return [] + + +class OctalIntYAML11(ScalarInt): + """OctalInt representation for YAML 1.1.""" + + # tell mypy that ScalarInt has these attributes + _width: Any + _underscore: Any + + def __new__(cls, *args: Any, **kwargs: Any) -> Any: + """Create a new int with ScalarInt-defined attributes.""" + return ScalarInt.__new__(cls, *args, **kwargs) + + @staticmethod + def represent_octal(representer: RoundTripRepresenter, data: OctalIntYAML11) -> Any: + """Return a YAML 1.1 octal representation. + + Based on ruamel.yaml.representer.RoundTripRepresenter.represent_octal_int() + (which only handles the YAML 1.2 octal representation). + """ + v = format(data, "o") + anchor = data.yaml_anchor(any=True) + # noinspection PyProtectedMember + # pylint: disable=protected-access + return representer.insert_underscore( + "0", + v, + data._underscore, # noqa: SLF001 + anchor=anchor, + ) + + +class CustomConstructor(RoundTripConstructor): + """Custom YAML constructor that preserves Octal formatting in YAML 1.1.""" + + def construct_yaml_int(self, node: ScalarNode) -> Any: + """Construct int while preserving Octal formatting in YAML 1.1. + + ruamel.yaml only preserves the octal format for YAML 1.2. + For 1.1, it converts the octal to an int. So, we preserve the format. + + Code partially copied from ruamel.yaml (MIT licensed). + """ + ret = super().construct_yaml_int(node) + if self.resolver.processing_version == (1, 1) and isinstance(ret, int): + # Do not rewrite zero as octal. + if ret == 0: + return ret + # see if we've got an octal we need to preserve. + value_su = self.construct_scalar(node) + try: + v = value_su.rstrip("_") + underscore = [len(v) - v.rindex("_") - 1, False, False] # type: Any + except ValueError: + underscore = None + except IndexError: + underscore = None + value_s = value_su.replace("_", "") + if value_s[0] in "+-": + value_s = value_s[1:] + if value_s[0] == "0": + # got an octal in YAML 1.1 + ret = OctalIntYAML11( + ret, + width=None, + underscore=underscore, + anchor=node.anchor, + ) + return ret + + +CustomConstructor.add_constructor( + "tag:yaml.org,2002:int", + CustomConstructor.construct_yaml_int, +) + + +class FormattedEmitter(Emitter): + """Emitter that applies custom formatting rules when dumping YAML. + + Differences from ruamel.yaml defaults: + + - indentation of root-level sequences + - prefer double-quoted scalars over single-quoted scalars + + This ensures that root-level sequences are never indented. + All subsequent levels are indented as configured (normal ruamel.yaml behavior). + + Earlier implementations used dedent on ruamel.yaml's dumped output, + but string magic like that had a ton of problematic edge cases. + """ + + preferred_quote = '"' # either " or ' + + min_spaces_inside = 0 + max_spaces_inside = 1 + + _sequence_indent = 2 + _sequence_dash_offset = 0 # Should be _sequence_indent - 2 + _root_is_sequence = False + + _in_empty_flow_map = False + + @property + def _is_root_level_sequence(self) -> bool: + """Return True if this is a sequence at the root level of the yaml document.""" + return self.column < 2 and self._root_is_sequence + + def expect_document_root(self) -> None: + """Expect doc root (extend to record if the root doc is a sequence).""" + self._root_is_sequence = isinstance( + self.event, + ruamel.yaml.events.SequenceStartEvent, + ) + return super().expect_document_root() + + # NB: mypy does not support overriding attributes with properties yet: + # https://github.com/python/mypy/issues/4125 + # To silence we have to ignore[override] both the @property and the method. + + @property + def best_sequence_indent(self) -> int: + """Return the configured sequence_indent or 2 for root level.""" + return 2 if self._is_root_level_sequence else self._sequence_indent + + @best_sequence_indent.setter + def best_sequence_indent(self, value: int) -> None: + """Configure how many columns to indent each sequence item (including the '-').""" + self._sequence_indent = value + + @property + def sequence_dash_offset(self) -> int: + """Return the configured sequence_dash_offset or 0 for root level.""" + return 0 if self._is_root_level_sequence else self._sequence_dash_offset + + @sequence_dash_offset.setter + def sequence_dash_offset(self, value: int) -> None: + """Configure how many spaces to put before each sequence item's '-'.""" + self._sequence_dash_offset = value + + def choose_scalar_style(self) -> Any: + """Select how to quote scalars if needed.""" + style = super().choose_scalar_style() + if ( + style == "" # noqa: PLC1901 + and self.event.value.startswith("0") + and len(self.event.value) > 1 + ): + if self.event.tag == "tag:yaml.org,2002:int" and self.event.implicit[0]: + # ensures that "0123" string does not lose its quoting + self.event.tag = "tag:yaml.org,2002:str" + self.event.implicit = (True, True, True) + return '"' + if style != "'": + # block scalar, double quoted, etc. + return style + if '"' in self.event.value: + return "'" + return self.preferred_quote + + def write_indicator( + self, + indicator: str, # ruamel.yaml typehint is wrong. This is a string. + need_whitespace: bool, + whitespace: bool = False, # noqa: FBT002 + indention: bool = False, # (sic) ruamel.yaml has this typo in their API # noqa: FBT002 + ) -> None: + """Make sure that flow maps get whitespace by the curly braces.""" + # We try to go with one whitespace by the curly braces and adjust accordingly + # to what min_spaces_inside and max_spaces_inside are set to. + # This assumes min_spaces_inside <= max_spaces_inside + spaces_inside = min( + max(1, self.min_spaces_inside), + self.max_spaces_inside if self.max_spaces_inside != -1 else 1, + ) + # If this is the end of the flow mapping that isn't on a new line: + if ( + indicator == "}" + and (self.column or 0) > (self.indent or 0) + and not self._in_empty_flow_map + ): + indicator = (" " * spaces_inside) + "}" + super().write_indicator(indicator, need_whitespace, whitespace, indention) + # if it is the start of a flow mapping, and it's not time + # to wrap the lines, insert a space. + if indicator == "{" and self.column < self.best_width: + if self.check_empty_mapping(): + self._in_empty_flow_map = True + else: + self.column += 1 + self.stream.write(" " * spaces_inside) + self._in_empty_flow_map = False + + # "/n/n" results in one blank line (end the previous line, then newline). + # So, "/n/n/n" or more is too many new lines. Clean it up. + _re_repeat_blank_lines: Pattern[str] = re.compile(r"\n{3,}") + + @staticmethod + def add_octothorpe_protection(string: str) -> str: + """Modify strings to protect "#" from full-line-comment post-processing.""" + try: + if "#" in string: + # # is \uFF03 (fullwidth number sign) + # ﹟ is \uFE5F (small number sign) + string = string.replace("#", "\uFF03#\uFE5F") + # this is safe even if this sequence is present + # because it gets reversed in post-processing + except (ValueError, TypeError): + # probably not really a string. Whatever. + pass + return string + + @staticmethod + def drop_octothorpe_protection(string: str) -> str: + """Remove string protection of "#" after full-line-comment post-processing.""" + try: + if "\uFF03#\uFE5F" in string: + # # is \uFF03 (fullwidth number sign) + # ﹟ is \uFE5F (small number sign) + string = string.replace("\uFF03#\uFE5F", "#") + except (ValueError, TypeError): + # probably not really a string. Whatever. + pass + return string + + def analyze_scalar(self, scalar: str) -> ScalarAnalysis: + """Determine quoting and other requirements for string. + + And protect "#" from full-line-comment post-processing. + """ + analysis: ScalarAnalysis = super().analyze_scalar(scalar) + if analysis.empty: + return analysis + analysis.scalar = self.add_octothorpe_protection(analysis.scalar) + return analysis + + # comment is a CommentToken, not Any (Any is ruamel.yaml's lazy type hint). + def write_comment( + self, + comment: CommentToken, + pre: bool = False, # noqa: FBT002 + ) -> None: + """Clean up extra new lines and spaces in comments. + + ruamel.yaml treats new or empty lines as comments. + See: https://stackoverflow.com/questions/42708668/removing-all-blank-lines-but-not-comments-in-ruamel-yaml/42712747#42712747 + """ + value: str = comment.value + if ( + pre + and not value.strip() + and not isinstance( + self.event, + ( + ruamel.yaml.events.CollectionEndEvent, + ruamel.yaml.events.DocumentEndEvent, + ruamel.yaml.events.StreamEndEvent, + ), + ) + ): + # drop pure whitespace pre comments + # does not apply to End events since they consume one of the newlines. + value = "" + elif pre: + # preserve content in pre comment with at least one newline, + # but no extra blank lines. + value = self._re_repeat_blank_lines.sub("\n", value) + else: + # single blank lines in post comments + value = self._re_repeat_blank_lines.sub("\n\n", value) + comment.value = value + + # make sure that the eol comment only has one space before it. + if comment.column > self.column + 1 and not pre: + comment.column = self.column + 1 + + return super().write_comment(comment, pre) + + def write_version_directive(self, version_text: Any) -> None: + """Skip writing '%YAML 1.1'.""" + if version_text == "1.1": + return + super().write_version_directive(version_text) + + +# pylint: disable=too-many-instance-attributes +class FormattedYAML(YAML): + """A YAML loader/dumper that handles ansible content better by default.""" + + def __init__( + self, + *, + typ: str | None = None, + pure: bool = False, + output: Any = None, + plug_ins: list[str] | None = None, + ): + """Return a configured ``ruamel.yaml.YAML`` instance. + + Some config defaults get extracted from the yamllint config. + + ``ruamel.yaml.YAML`` uses attributes to configure how it dumps yaml files. + Some of these settings can be confusing, so here are examples of how different + settings will affect the dumped yaml. + + This example does not indent any sequences: + + .. code:: python + + yaml.explicit_start=True + yaml.map_indent=2 + yaml.sequence_indent=2 + yaml.sequence_dash_offset=0 + + .. code:: yaml + + --- + - name: A playbook + tasks: + - name: Task + + This example indents all sequences including the root-level: + + .. code:: python + + yaml.explicit_start=True + yaml.map_indent=2 + yaml.sequence_indent=4 + yaml.sequence_dash_offset=2 + # yaml.Emitter defaults to ruamel.yaml.emitter.Emitter + + .. code:: yaml + + --- + - name: Playbook + tasks: + - name: Task + + This example indents all sequences except at the root-level: + + .. code:: python + + yaml.explicit_start=True + yaml.map_indent=2 + yaml.sequence_indent=4 + yaml.sequence_dash_offset=2 + yaml.Emitter = FormattedEmitter # custom Emitter prevents root-level indents + + .. code:: yaml + + --- + - name: Playbook + tasks: + - name: Task + """ + # Default to reading/dumping YAML 1.1 (ruamel.yaml defaults to 1.2) + self._yaml_version_default: tuple[int, int] = (1, 1) + self._yaml_version: str | tuple[int, int] = self._yaml_version_default + + super().__init__(typ=typ, pure=pure, output=output, plug_ins=plug_ins) + + # NB: We ignore some mypy issues because ruamel.yaml typehints are not great. + + config = self._defaults_from_yamllint_config() + + # these settings are derived from yamllint config + self.explicit_start: bool = config["explicit_start"] # type: ignore[assignment] + self.explicit_end: bool = config["explicit_end"] # type: ignore[assignment] + self.width: int = config["width"] # type: ignore[assignment] + indent_sequences: bool = cast(bool, config["indent_sequences"]) + preferred_quote: str = cast(str, config["preferred_quote"]) # either ' or " + + min_spaces_inside: int = cast(int, config["min_spaces_inside"]) + max_spaces_inside: int = cast(int, config["max_spaces_inside"]) + + self.default_flow_style = False + self.compact_seq_seq = True # type: ignore[assignment] # dash after dash + self.compact_seq_map = True # type: ignore[assignment] # key after dash + + # Do not use yaml.indent() as it obscures the purpose of these vars: + self.map_indent = 2 + self.sequence_indent = 4 if indent_sequences else 2 + self.sequence_dash_offset = self.sequence_indent - 2 + + # If someone doesn't want our FormattedEmitter, they can change it. + self.Emitter = FormattedEmitter + + # ignore invalid preferred_quote setting + if preferred_quote in ['"', "'"]: + FormattedEmitter.preferred_quote = preferred_quote + # NB: default_style affects preferred_quote as well. + # self.default_style ∈ None (default), '', '"', "'", '|', '>' + + # spaces inside braces for flow mappings + FormattedEmitter.min_spaces_inside = min_spaces_inside + FormattedEmitter.max_spaces_inside = max_spaces_inside + + # We need a custom constructor to preserve Octal formatting in YAML 1.1 + self.Constructor = CustomConstructor + self.Representer.add_representer(OctalIntYAML11, OctalIntYAML11.represent_octal) + + # We should preserve_quotes loads all strings as a str subclass that carries + # a quote attribute. Will the str subclasses cause problems in transforms? + # Are there any other gotchas to this? + # + # This will only preserve quotes for strings read from the file. + # anything modified by the transform will use no quotes, preferred_quote, + # or the quote that results in the least amount of escaping. + + # If needed, we can use this to change null representation to be explicit + # (see https://stackoverflow.com/a/44314840/1134951) + # self.Representer.add_representer( + + @staticmethod + def _defaults_from_yamllint_config() -> dict[str, bool | int | str]: + """Extract FormattedYAML-relevant settings from yamllint config if possible.""" + config = { + "explicit_start": True, + "explicit_end": False, + "width": 160, + "indent_sequences": True, + "preferred_quote": '"', + "min_spaces_inside": 0, + "max_spaces_inside": 1, + } + for rule, rule_config in load_yamllint_config().rules.items(): + if not rule_config: + # rule disabled + continue + + # refactor this if ... elif ... elif ... else monstrosity using match/case (PEP 634) once python 3.10 is mandatory + if rule == "document-start": + config["explicit_start"] = rule_config["present"] + elif rule == "document-end": + config["explicit_end"] = rule_config["present"] + elif rule == "line-length": + config["width"] = rule_config["max"] + elif rule == "braces": + min_spaces_inside = rule_config["min-spaces-inside"] + if min_spaces_inside: + config["min_spaces_inside"] = int(min_spaces_inside) + max_spaces_inside = rule_config["max-spaces-inside"] + if max_spaces_inside: + config["max_spaces_inside"] = int(max_spaces_inside) + elif rule == "indentation": + indent_sequences = rule_config["indent-sequences"] + # one of: bool, "whatever", "consistent" + # so, we use True for "whatever" and "consistent" + config["indent_sequences"] = bool(indent_sequences) + elif rule == "quoted-strings": + quote_type = rule_config["quote-type"] + # one of: single, double, any + if quote_type == "single": + config["preferred_quote"] = "'" + elif quote_type == "double": + config["preferred_quote"] = '"' + + return cast(dict[str, Union[bool, int, str]], config) + + @property # type: ignore[override] + def version(self) -> str | tuple[int, int]: + """Return the YAML version used to parse or dump. + + Ansible uses PyYAML which only supports YAML 1.1. ruamel.yaml defaults to 1.2. + So, we have to make sure we dump yaml files using YAML 1.1. + We can relax the version requirement once ansible uses a version of PyYAML + that includes this PR: https://github.com/yaml/pyyaml/pull/555 + """ + return self._yaml_version + + @version.setter + def version(self, value: str | tuple[int, int] | None) -> None: + """Ensure that yaml version uses our default value. + + The yaml Reader updates this value based on the ``%YAML`` directive in files. + So, if a file does not include the directive, it sets this to None. + But, None effectively resets the parsing version to YAML 1.2 (ruamel's default). + """ + self._yaml_version = value if value is not None else self._yaml_version_default + + def loads(self, stream: str) -> Any: + """Load YAML content from a string while avoiding known ruamel.yaml issues.""" + if not isinstance(stream, str): + msg = f"expected a str but got {type(stream)}" + raise NotImplementedError(msg) + # As ruamel drops comments for any document that is not a mapping or sequence, + # we need to avoid using it to reformat those documents. + # https://sourceforge.net/p/ruamel-yaml/tickets/460/ + + text, preamble_comment = self._pre_process_yaml(stream) + data = self.load(stream=text) + if preamble_comment is not None and isinstance( + data, + (CommentedMap, CommentedSeq), + ): + data.preamble_comment = preamble_comment # type: ignore[union-attr] + # Because data can validly also be None for empty documents, we cannot + # really annotate the return type here, so we need to remember to + # never save None or scalar data types when reformatting. + return data + + def dumps(self, data: Any) -> str: + """Dump YAML document to string (including its preamble_comment).""" + preamble_comment: str | None = getattr(data, "preamble_comment", None) + self._prevent_wrapping_flow_style(data) + with StringIO() as stream: + if preamble_comment: + stream.write(preamble_comment) + self.dump(data, stream) + text = stream.getvalue() + return self._post_process_yaml(text) + + def _prevent_wrapping_flow_style(self, data: Any) -> None: + if not isinstance(data, (CommentedMap, CommentedSeq)): + return + for key, value, parent_path in nested_items_path(data): + if not isinstance(value, (CommentedMap, CommentedSeq)): + continue + fa: Format = value.fa # pylint: disable=invalid-name + if fa.flow_style(): + predicted_indent = self._predict_indent_length(parent_path, key) + predicted_width = len(str(value)) + if predicted_indent + predicted_width > self.width: + # this flow-style map will probably get line-wrapped, + # so, switch it to block style to avoid the line wrap. + fa.set_block_style() + + def _predict_indent_length(self, parent_path: list[str | int], key: Any) -> int: + indent = 0 + + # each parent_key type tells us what the indent is for the next level. + for parent_key in parent_path: + if isinstance(parent_key, int) and indent == 0: + # root level is a sequence + indent += self.sequence_dash_offset + elif isinstance(parent_key, int): + # next level is a sequence + indent += cast(int, self.sequence_indent) + elif isinstance(parent_key, str): + # next level is a map + indent += cast(int, self.map_indent) + + if isinstance(key, int) and indent == 0: + # flow map is an item in a root-level sequence + indent += self.sequence_dash_offset + elif isinstance(key, int) and indent > 0: + # flow map is in a sequence + indent += cast(int, self.sequence_indent) + elif isinstance(key, str): + # flow map is in a map + indent += len(key + ": ") + + return indent + + # ruamel.yaml only preserves empty (no whitespace) blank lines + # (ie "/n/n" becomes "/n/n" but "/n /n" becomes "/n"). + # So, we need to identify whitespace-only lines to drop spaces before reading. + _whitespace_only_lines_re = re.compile(r"^ +$", re.MULTILINE) + + def _pre_process_yaml(self, text: str) -> tuple[str, str | None]: + """Handle known issues with ruamel.yaml loading. + + Preserve blank lines despite extra whitespace. + Preserve any preamble (aka header) comments before "---". + + For more on preamble comments, see: https://stackoverflow.com/questions/70286108/python-ruamel-yaml-package-how-to-get-header-comment-lines/70287507#70287507 + """ + text = self._whitespace_only_lines_re.sub("", text) + + # I investigated extending ruamel.yaml to capture preamble comments. + # preamble comment goes from: + # DocumentStartToken.comment -> DocumentStartEvent.comment + # Then, in the composer: + # once in composer.current_event + # discards DocumentStartEvent + # move DocumentStartEvent to composer.last_event + # all document nodes get composed (events get used) + # discard DocumentEndEvent + # move DocumentEndEvent to composer.last_event + # So, there's no convenient way to extend the composer + # to somehow capture the comments and pass them on. + + preamble_comments = [] + if "\n---\n" not in text and "\n--- " not in text: + # nothing is before the document start mark, + # so there are no comments to preserve. + return text, None + for line in text.splitlines(True): + # We only need to capture the preamble comments. No need to remove them. + # lines might also include directives. + if line.lstrip().startswith("#") or line == "\n": + preamble_comments.append(line) + elif line.startswith("---"): + break + + return text, "".join(preamble_comments) or None + + @staticmethod + def _post_process_yaml(text: str) -> str: + """Handle known issues with ruamel.yaml dumping. + + Make sure there's only one newline at the end of the file. + + Fix the indent of full-line comments to match the indent of the next line. + See: https://stackoverflow.com/questions/71354698/how-can-i-use-the-ruamel-yaml-rtsc-mode/71355688#71355688 + Also, removes "#" protection from strings that prevents them from being + identified as full line comments in post-processing. + + Make sure null list items don't end in a space. + """ + text = text.rstrip("\n") + "\n" + + lines = text.splitlines(keepends=True) + full_line_comments: list[tuple[int, str]] = [] + for i, line in enumerate(lines): + stripped = line.lstrip() + if not stripped: + # blank line. Move on. + continue + + space_length = len(line) - len(stripped) + + if stripped.startswith("#"): + # got a full line comment + + # allow some full line comments to match the previous indent + if i > 0 and not full_line_comments and space_length: + prev = lines[i - 1] + prev_space_length = len(prev) - len(prev.lstrip()) + if prev_space_length == space_length: + # if the indent matches the previous line's indent, skip it. + continue + + full_line_comments.append((i, stripped)) + elif full_line_comments: + # end of full line comments so adjust to match indent of this line + spaces = " " * space_length + for index, comment in full_line_comments: + lines[index] = spaces + comment + full_line_comments.clear() + + cleaned = line.strip() + if not cleaned.startswith("#") and cleaned.endswith("-"): + # got an empty list item. drop any trailing spaces. + lines[i] = line.rstrip() + "\n" + + text = "".join( + FormattedEmitter.drop_octothorpe_protection(line) for line in lines + ) + return text + + +def clean_json( + obj: Any, + func: Callable[[str], Any] = lambda key: key.startswith("__") + if isinstance(key, str) + else False, +) -> Any: + """Remove all keys matching the condition from a nested JSON-like object. + + :param obj: a JSON like object to clean, also returned for chaining. + :param func: a callable that takes a key in argument and return True for each key to delete + """ + if isinstance(obj, dict): + for key in list(obj.keys()): + if func(key): + del obj[key] + else: + clean_json(obj[key], func) + elif isinstance(obj, list): + for i in reversed(range(len(obj))): + if func(obj[i]): + del obj[i] + else: + clean_json(obj[i], func) + else: + # neither a dict nor a list, do nothing + pass + return obj |