summaryrefslogtreecommitdiffstats
path: root/src/ansiblelint/rules/ignore_errors.py
blob: 29f04088d9f25c4841db402542610fbf440192b4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
"""IgnoreErrorsRule used with ansible-lint."""

from __future__ import annotations

import sys
from typing import TYPE_CHECKING

from ansiblelint.rules import AnsibleLintRule

if TYPE_CHECKING:
    from ansiblelint.file_utils import Lintable
    from ansiblelint.utils import Task


class IgnoreErrorsRule(AnsibleLintRule):
    """Use failed_when and specify error conditions instead of using ignore_errors."""

    id = "ignore-errors"
    description = (
        "Instead of ignoring all errors, ignore the errors only when using ``{{ ansible_check_mode }}``, "
        "register the errors using ``register``, "
        "or use ``failed_when:`` and specify acceptable error conditions "
        "to reduce the risk of ignoring important failures."
    )
    severity = "LOW"
    tags = ["unpredictability"]
    version_added = "v5.0.7"

    def matchtask(
        self,
        task: Task,
        file: Lintable | None = None,
    ) -> bool | str:
        if (
            task.get("ignore_errors")
            and task.get("ignore_errors") != "{{ ansible_check_mode }}"
            and not task.get("register")
        ):
            return True

        return False


if "pytest" in sys.modules:
    import pytest

    if TYPE_CHECKING:
        from ansiblelint.testing import RunFromText

    IGNORE_ERRORS_TRUE = """
- hosts: all
  tasks:
    - name: Run apt-get update
      command: apt-get update
      ignore_errors: true
"""

    IGNORE_ERRORS_FALSE = """
- hosts: all
  tasks:
    - name: Run apt-get update
      command: apt-get update
      ignore_errors: false
"""

    IGNORE_ERRORS_CHECK_MODE = """
- hosts: all
  tasks:
    - name: Run apt-get update
      command: apt-get update
      ignore_errors: "{{ ansible_check_mode }}"
"""

    IGNORE_ERRORS_REGISTER = """
- hosts: all
  tasks:
    - name: Run apt-get update
      command: apt-get update
      ignore_errors: true
      register: ignore_errors_register
"""

    FAILED_WHEN = """
- hosts: all
  tasks:
    - name: Disable apport
      become: 'yes'
      lineinfile:
        line: "enabled=0"
        dest: /etc/default/apport
        mode: 0644
        state: present
      register: default_apport
      failed_when: default_apport.rc !=0 and not default_apport.rc == 257
"""

    @pytest.mark.parametrize(
        "rule_runner",
        (IgnoreErrorsRule,),
        indirect=["rule_runner"],
    )
    def test_ignore_errors_true(rule_runner: RunFromText) -> None:
        """The task uses ignore_errors."""
        results = rule_runner.run_playbook(IGNORE_ERRORS_TRUE)
        assert len(results) == 1

    @pytest.mark.parametrize(
        "rule_runner",
        (IgnoreErrorsRule,),
        indirect=["rule_runner"],
    )
    def test_ignore_errors_false(rule_runner: RunFromText) -> None:
        """The task uses ignore_errors: false, oddly enough."""
        results = rule_runner.run_playbook(IGNORE_ERRORS_FALSE)
        assert len(results) == 0

    @pytest.mark.parametrize(
        "rule_runner",
        (IgnoreErrorsRule,),
        indirect=["rule_runner"],
    )
    def test_ignore_errors_check_mode(rule_runner: RunFromText) -> None:
        """The task uses ignore_errors: "{{ ansible_check_mode }}"."""
        results = rule_runner.run_playbook(IGNORE_ERRORS_CHECK_MODE)
        assert len(results) == 0

    @pytest.mark.parametrize(
        "rule_runner",
        (IgnoreErrorsRule,),
        indirect=["rule_runner"],
    )
    def test_ignore_errors_register(rule_runner: RunFromText) -> None:
        """The task uses ignore_errors: but output is registered and managed."""
        results = rule_runner.run_playbook(IGNORE_ERRORS_REGISTER)
        assert len(results) == 0

    @pytest.mark.parametrize(
        "rule_runner",
        (IgnoreErrorsRule,),
        indirect=["rule_runner"],
    )
    def test_failed_when(rule_runner: RunFromText) -> None:
        """Instead of ignore_errors, this task uses failed_when."""
        results = rule_runner.run_playbook(FAILED_WHEN)
        assert len(results) == 0