summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 18:05:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 18:05:20 +0000
commitc86df75ab11643fa4649cfe6ed5c4692d4ee342b (patch)
treede847f47ec2669e74b9a3459319579346b7c99df /tests
parentInitial commit. (diff)
downloadpre-commit-c56a389ed17c1ea9f9035b703ff94f6b26511d21.tar.xz
pre-commit-c56a389ed17c1ea9f9035b703ff94f6b26511d21.zip
Adding upstream version 3.6.2.upstream/3.6.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/all_languages_test.py7
-rw-r--r--tests/clientlib_test.py481
-rw-r--r--tests/color_test.py61
-rw-r--r--tests/commands/__init__.py0
-rw-r--r--tests/commands/autoupdate_test.py532
-rw-r--r--tests/commands/clean_test.py35
-rw-r--r--tests/commands/gc_test.py164
-rw-r--r--tests/commands/hook_impl_test.py381
-rw-r--r--tests/commands/init_templatedir_test.py142
-rw-r--r--tests/commands/install_uninstall_test.py1104
-rw-r--r--tests/commands/migrate_config_test.py177
-rw-r--r--tests/commands/run_test.py1219
-rw-r--r--tests/commands/sample_config_test.py21
-rw-r--r--tests/commands/try_repo_test.py155
-rw-r--r--tests/commands/validate_config_test.py64
-rw-r--r--tests/commands/validate_manifest_test.py18
-rw-r--r--tests/conftest.py252
-rw-r--r--tests/envcontext_test.py103
-rw-r--r--tests/error_handler_test.py220
-rw-r--r--tests/git_test.py284
-rw-r--r--tests/lang_base_test.py166
-rw-r--r--tests/languages/__init__.py0
-rw-r--r--tests/languages/conda_test.py72
-rw-r--r--tests/languages/coursier_test.py45
-rw-r--r--tests/languages/dart_test.py62
-rw-r--r--tests/languages/docker_image_test.py27
-rw-r--r--tests/languages/docker_test.py197
-rw-r--r--tests/languages/dotnet_test.py154
-rw-r--r--tests/languages/fail_test.py14
-rw-r--r--tests/languages/golang_test.py167
-rw-r--r--tests/languages/haskell_test.py50
-rw-r--r--tests/languages/lua_test.py58
-rw-r--r--tests/languages/node_test.py152
-rw-r--r--tests/languages/perl_test.py69
-rw-r--r--tests/languages/pygrep_test.py144
-rw-r--r--tests/languages/python_test.py286
-rw-r--r--tests/languages/r_test.py223
-rw-r--r--tests/languages/ruby_test.py139
-rw-r--r--tests/languages/rust_test.py106
-rw-r--r--tests/languages/script_test.py14
-rw-r--r--tests/languages/swift_test.py31
-rw-r--r--tests/languages/system_test.py9
-rw-r--r--tests/logging_handler_test.py23
-rw-r--r--tests/main_test.py224
-rw-r--r--tests/meta_hooks/__init__.py0
-rw-r--r--tests/meta_hooks/check_hooks_apply_test.py140
-rw-r--r--tests/meta_hooks/check_useless_excludes_test.py139
-rw-r--r--tests/meta_hooks/identity_test.py8
-rw-r--r--tests/output_test.py11
-rw-r--r--tests/parse_shebang_test.py154
-rw-r--r--tests/prefix_test.py46
-rw-r--r--tests/repository_test.py533
-rw-r--r--tests/staged_files_only_test.py449
-rw-r--r--tests/store_test.py272
-rw-r--r--tests/util_test.py108
-rw-r--r--tests/xargs_test.py251
57 files changed, 9963 insertions, 0 deletions
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/all_languages_test.py b/tests/all_languages_test.py
new file mode 100644
index 0000000..98c9121
--- /dev/null
+++ b/tests/all_languages_test.py
@@ -0,0 +1,7 @@
+from __future__ import annotations
+
+from pre_commit.all_languages import languages
+
+
+def test_python_venv_is_an_alias_to_python():
+ assert languages['python_venv'] is languages['python']
diff --git a/tests/clientlib_test.py b/tests/clientlib_test.py
new file mode 100644
index 0000000..eaa8a04
--- /dev/null
+++ b/tests/clientlib_test.py
@@ -0,0 +1,481 @@
+from __future__ import annotations
+
+import logging
+import re
+
+import cfgv
+import pytest
+
+import pre_commit.constants as C
+from pre_commit.clientlib import check_type_tag
+from pre_commit.clientlib import CONFIG_HOOK_DICT
+from pre_commit.clientlib import CONFIG_REPO_DICT
+from pre_commit.clientlib import CONFIG_SCHEMA
+from pre_commit.clientlib import DEFAULT_LANGUAGE_VERSION
+from pre_commit.clientlib import MANIFEST_HOOK_DICT
+from pre_commit.clientlib import MANIFEST_SCHEMA
+from pre_commit.clientlib import META_HOOK_DICT
+from pre_commit.clientlib import OptionalSensibleRegexAtHook
+from pre_commit.clientlib import OptionalSensibleRegexAtTop
+from pre_commit.clientlib import parse_version
+from testing.fixtures import sample_local_config
+
+
+def is_valid_according_to_schema(obj, obj_schema):
+ try:
+ cfgv.validate(obj, obj_schema)
+ return True
+ except cfgv.ValidationError:
+ return False
+
+
+@pytest.mark.parametrize('value', ('definitely-not-a-tag', 'fiel'))
+def test_check_type_tag_failures(value):
+ with pytest.raises(cfgv.ValidationError):
+ check_type_tag(value)
+
+
+def test_check_type_tag_success():
+ check_type_tag('file')
+
+
+@pytest.mark.parametrize(
+ 'cfg',
+ (
+ {
+ 'repos': [{
+ 'repo': 'git@github.com:pre-commit/pre-commit-hooks',
+ 'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
+ 'hooks': [{'id': 'pyflakes', 'files': '\\.py$'}],
+ }],
+ },
+ {
+ 'repos': [{
+ 'repo': 'git@github.com:pre-commit/pre-commit-hooks',
+ 'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
+ 'hooks': [
+ {
+ 'id': 'pyflakes',
+ 'files': '\\.py$',
+ 'args': ['foo', 'bar', 'baz'],
+ },
+ ],
+ }],
+ },
+ ),
+)
+def test_config_valid(cfg):
+ assert is_valid_according_to_schema(cfg, CONFIG_SCHEMA)
+
+
+def test_invalid_config_wrong_type():
+ cfg = {
+ 'repos': [{
+ 'repo': 'git@github.com:pre-commit/pre-commit-hooks',
+ 'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
+ 'hooks': [
+ {
+ 'id': 'pyflakes',
+ 'files': '\\.py$',
+ # Exclude pattern must be a string
+ 'exclude': 0,
+ 'args': ['foo', 'bar', 'baz'],
+ },
+ ],
+ }],
+ }
+ assert not is_valid_according_to_schema(cfg, CONFIG_SCHEMA)
+
+
+def test_local_hooks_with_rev_fails():
+ config_obj = {'repos': [dict(sample_local_config(), rev='foo')]}
+ with pytest.raises(cfgv.ValidationError):
+ cfgv.validate(config_obj, CONFIG_SCHEMA)
+
+
+def test_config_with_local_hooks_definition_passes():
+ config_obj = {'repos': [sample_local_config()]}
+ cfgv.validate(config_obj, CONFIG_SCHEMA)
+
+
+def test_config_schema_does_not_contain_defaults():
+ """Due to the way our merging works, if this schema has any defaults they
+ will clobber potentially useful values in the backing manifest. #227
+ """
+ for item in CONFIG_HOOK_DICT.items:
+ assert not isinstance(item, cfgv.Optional)
+
+
+def test_ci_map_key_allowed_at_top_level(caplog):
+ cfg = {
+ 'ci': {'skip': ['foo']},
+ 'repos': [{'repo': 'meta', 'hooks': [{'id': 'identity'}]}],
+ }
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+ assert not caplog.record_tuples
+
+
+def test_ci_key_must_be_map():
+ with pytest.raises(cfgv.ValidationError):
+ cfgv.validate({'ci': 'invalid', 'repos': []}, CONFIG_SCHEMA)
+
+
+@pytest.mark.parametrize(
+ 'rev',
+ (
+ 'v0.12.4',
+ 'b27f281',
+ 'b27f281eb9398fc8504415d7fbdabf119ea8c5e1',
+ '19.10b0',
+ '4.3.21-2',
+ ),
+)
+def test_warn_mutable_rev_ok(caplog, rev):
+ config_obj = {
+ 'repo': 'https://gitlab.com/pycqa/flake8',
+ 'rev': rev,
+ 'hooks': [{'id': 'flake8'}],
+ }
+ cfgv.validate(config_obj, CONFIG_REPO_DICT)
+
+ assert caplog.record_tuples == []
+
+
+@pytest.mark.parametrize(
+ 'rev',
+ (
+ '',
+ 'HEAD',
+ 'stable',
+ 'master',
+ 'some_branch_name',
+ ),
+)
+def test_warn_mutable_rev_invalid(caplog, rev):
+ config_obj = {
+ 'repo': 'https://gitlab.com/pycqa/flake8',
+ 'rev': rev,
+ 'hooks': [{'id': 'flake8'}],
+ }
+ cfgv.validate(config_obj, CONFIG_REPO_DICT)
+
+ assert caplog.record_tuples == [
+ (
+ 'pre_commit',
+ logging.WARNING,
+ "The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' "
+ 'appears to be a mutable reference (moving tag / branch). '
+ 'Mutable references are never updated after first install and are '
+ 'not supported. '
+ 'See https://pre-commit.com/#using-the-latest-version-for-a-repository ' # noqa: E501
+ 'for more details. '
+ 'Hint: `pre-commit autoupdate` often fixes this.',
+ ),
+ ]
+
+
+def test_warn_mutable_rev_conditional():
+ config_obj = {
+ 'repo': 'meta',
+ 'rev': '3.7.7',
+ 'hooks': [{'id': 'flake8'}],
+ }
+
+ with pytest.raises(cfgv.ValidationError):
+ cfgv.validate(config_obj, CONFIG_REPO_DICT)
+
+
+@pytest.mark.parametrize(
+ 'validator_cls',
+ (
+ OptionalSensibleRegexAtHook,
+ OptionalSensibleRegexAtTop,
+ ),
+)
+def test_sensible_regex_validators_dont_pass_none(validator_cls):
+ validator = validator_cls('files', cfgv.check_string)
+ with pytest.raises(cfgv.ValidationError) as excinfo:
+ validator.check({'files': None})
+
+ assert str(excinfo.value) == (
+ '\n'
+ '==> At key: files'
+ '\n'
+ '=====> Expected string got NoneType'
+ )
+
+
+@pytest.mark.parametrize(
+ ('regex', 'warning'),
+ (
+ (
+ r'dir/*.py',
+ "The 'files' field in hook 'flake8' is a regex, not a glob -- "
+ "matching '/*' probably isn't what you want here",
+ ),
+ (
+ r'dir[\/].*\.py',
+ r"pre-commit normalizes slashes in the 'files' field in hook "
+ r"'flake8' to forward slashes, so you can use / instead of [\/]",
+ ),
+ (
+ r'dir[/\\].*\.py',
+ r"pre-commit normalizes slashes in the 'files' field in hook "
+ r"'flake8' to forward slashes, so you can use / instead of [/\\]",
+ ),
+ (
+ r'dir[\\/].*\.py',
+ r"pre-commit normalizes slashes in the 'files' field in hook "
+ r"'flake8' to forward slashes, so you can use / instead of [\\/]",
+ ),
+ ),
+)
+def test_validate_optional_sensible_regex_at_hook(caplog, regex, warning):
+ config_obj = {
+ 'id': 'flake8',
+ 'files': regex,
+ }
+ cfgv.validate(config_obj, CONFIG_HOOK_DICT)
+
+ assert caplog.record_tuples == [('pre_commit', logging.WARNING, warning)]
+
+
+def test_validate_optional_sensible_regex_at_local_hook(caplog):
+ config_obj = sample_local_config()
+ config_obj['hooks'][0]['files'] = 'dir/*.py'
+
+ cfgv.validate(config_obj, CONFIG_REPO_DICT)
+
+ assert caplog.record_tuples == [
+ (
+ 'pre_commit',
+ logging.WARNING,
+ "The 'files' field in hook 'do_not_commit' is a regex, not a glob "
+ "-- matching '/*' probably isn't what you want here",
+ ),
+ ]
+
+
+@pytest.mark.parametrize(
+ ('regex', 'warning'),
+ (
+ (
+ r'dir/*.py',
+ "The top-level 'files' field is a regex, not a glob -- "
+ "matching '/*' probably isn't what you want here",
+ ),
+ (
+ r'dir[\/].*\.py',
+ r"pre-commit normalizes the slashes in the top-level 'files' "
+ r'field to forward slashes, so you can use / instead of [\/]',
+ ),
+ (
+ r'dir[/\\].*\.py',
+ r"pre-commit normalizes the slashes in the top-level 'files' "
+ r'field to forward slashes, so you can use / instead of [/\\]',
+ ),
+ (
+ r'dir[\\/].*\.py',
+ r"pre-commit normalizes the slashes in the top-level 'files' "
+ r'field to forward slashes, so you can use / instead of [\\/]',
+ ),
+ ),
+)
+def test_validate_optional_sensible_regex_at_top_level(caplog, regex, warning):
+ config_obj = {
+ 'files': regex,
+ 'repos': [],
+ }
+ cfgv.validate(config_obj, CONFIG_SCHEMA)
+
+ assert caplog.record_tuples == [('pre_commit', logging.WARNING, warning)]
+
+
+@pytest.mark.parametrize(
+ 'manifest_obj',
+ (
+ [{
+ 'id': 'a',
+ 'name': 'b',
+ 'entry': 'c',
+ 'language': 'python',
+ 'files': r'\.py$',
+ }],
+ [{
+ 'id': 'a',
+ 'name': 'b',
+ 'entry': 'c',
+ 'language': 'python',
+ 'language_version': 'python3.4',
+ 'files': r'\.py$',
+ }],
+ # A regression in 0.13.5: always_run and files are permissible
+ [{
+ 'id': 'a',
+ 'name': 'b',
+ 'entry': 'c',
+ 'language': 'python',
+ 'files': '',
+ 'always_run': True,
+ }],
+ ),
+)
+def test_valid_manifests(manifest_obj):
+ assert is_valid_according_to_schema(manifest_obj, MANIFEST_SCHEMA)
+
+
+@pytest.mark.parametrize(
+ 'config_repo',
+ (
+ # i-dont-exist isn't a valid hook
+ {'repo': 'meta', 'hooks': [{'id': 'i-dont-exist'}]},
+ # invalid to set a language for a meta hook
+ {'repo': 'meta', 'hooks': [{'id': 'identity', 'language': 'python'}]},
+ # name override must be string
+ {'repo': 'meta', 'hooks': [{'id': 'identity', 'name': False}]},
+ pytest.param(
+ {
+ 'repo': 'meta',
+ 'hooks': [{'id': 'identity', 'entry': 'echo hi'}],
+ },
+ id='cannot override entry for meta hooks',
+ ),
+ ),
+)
+def test_meta_hook_invalid(config_repo):
+ with pytest.raises(cfgv.ValidationError):
+ cfgv.validate(config_repo, CONFIG_REPO_DICT)
+
+
+def test_meta_check_hooks_apply_only_at_top_level():
+ cfg = {'id': 'check-hooks-apply'}
+ cfg = cfgv.apply_defaults(cfg, META_HOOK_DICT)
+
+ files_re = re.compile(cfg['files'])
+ assert files_re.search('.pre-commit-config.yaml')
+ assert not files_re.search('foo/.pre-commit-config.yaml')
+
+
+@pytest.mark.parametrize(
+ 'mapping',
+ (
+ # invalid language key
+ {'pony': '1.0'},
+ # not a string for version
+ {'python': 3},
+ ),
+)
+def test_default_language_version_invalid(mapping):
+ with pytest.raises(cfgv.ValidationError):
+ cfgv.validate(mapping, DEFAULT_LANGUAGE_VERSION)
+
+
+def test_parse_version():
+ assert parse_version('0.0') == parse_version('0.0')
+ assert parse_version('0.1') > parse_version('0.0')
+ assert parse_version('2.1') >= parse_version('2')
+
+
+def test_minimum_pre_commit_version_failing():
+ cfg = {'repos': [], 'minimum_pre_commit_version': '999'}
+ with pytest.raises(cfgv.ValidationError) as excinfo:
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+ assert str(excinfo.value) == (
+ f'\n'
+ f'==> At Config()\n'
+ f'==> At key: minimum_pre_commit_version\n'
+ f'=====> pre-commit version 999 is required but version {C.VERSION} '
+ f'is installed. Perhaps run `pip install --upgrade pre-commit`.'
+ )
+
+
+def test_minimum_pre_commit_version_failing_in_config():
+ cfg = {'repos': [sample_local_config()]}
+ cfg['repos'][0]['hooks'][0]['minimum_pre_commit_version'] = '999'
+ with pytest.raises(cfgv.ValidationError) as excinfo:
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+ assert str(excinfo.value) == (
+ f'\n'
+ f'==> At Config()\n'
+ f'==> At key: repos\n'
+ f"==> At Repository(repo='local')\n"
+ f'==> At key: hooks\n'
+ f"==> At Hook(id='do_not_commit')\n"
+ f'==> At key: minimum_pre_commit_version\n'
+ f'=====> pre-commit version 999 is required but version {C.VERSION} '
+ f'is installed. Perhaps run `pip install --upgrade pre-commit`.'
+ )
+
+
+def test_minimum_pre_commit_version_failing_before_other_error():
+ cfg = {'repos': 5, 'minimum_pre_commit_version': '999'}
+ with pytest.raises(cfgv.ValidationError) as excinfo:
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+ assert str(excinfo.value) == (
+ f'\n'
+ f'==> At Config()\n'
+ f'==> At key: minimum_pre_commit_version\n'
+ f'=====> pre-commit version 999 is required but version {C.VERSION} '
+ f'is installed. Perhaps run `pip install --upgrade pre-commit`.'
+ )
+
+
+def test_minimum_pre_commit_version_passing():
+ cfg = {'repos': [], 'minimum_pre_commit_version': '0'}
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+
+
+@pytest.mark.parametrize('schema', (CONFIG_SCHEMA, CONFIG_REPO_DICT))
+def test_warn_additional(schema):
+ allowed_keys = {item.key for item in schema.items if hasattr(item, 'key')}
+ warn_additional, = (
+ x for x in schema.items if isinstance(x, cfgv.WarnAdditionalKeys)
+ )
+ assert allowed_keys == set(warn_additional.keys)
+
+
+def test_stages_migration_for_default_stages():
+ cfg = {
+ 'default_stages': ['commit-msg', 'push', 'commit', 'merge-commit'],
+ 'repos': [],
+ }
+ cfgv.validate(cfg, CONFIG_SCHEMA)
+ cfg = cfgv.apply_defaults(cfg, CONFIG_SCHEMA)
+ assert cfg['default_stages'] == [
+ 'commit-msg', 'pre-push', 'pre-commit', 'pre-merge-commit',
+ ]
+
+
+def test_manifest_stages_defaulting():
+ dct = {
+ 'id': 'fake-hook',
+ 'name': 'fake-hook',
+ 'entry': 'fake-hook',
+ 'language': 'system',
+ 'stages': ['commit-msg', 'push', 'commit', 'merge-commit'],
+ }
+ cfgv.validate(dct, MANIFEST_HOOK_DICT)
+ dct = cfgv.apply_defaults(dct, MANIFEST_HOOK_DICT)
+ assert dct['stages'] == [
+ 'commit-msg', 'pre-push', 'pre-commit', 'pre-merge-commit',
+ ]
+
+
+def test_config_hook_stages_defaulting_missing():
+ dct = {'id': 'fake-hook'}
+ cfgv.validate(dct, CONFIG_HOOK_DICT)
+ dct = cfgv.apply_defaults(dct, CONFIG_HOOK_DICT)
+ assert dct == {'id': 'fake-hook'}
+
+
+def test_config_hook_stages_defaulting():
+ dct = {
+ 'id': 'fake-hook',
+ 'stages': ['commit-msg', 'push', 'commit', 'merge-commit'],
+ }
+ cfgv.validate(dct, CONFIG_HOOK_DICT)
+ dct = cfgv.apply_defaults(dct, CONFIG_HOOK_DICT)
+ assert dct == {
+ 'id': 'fake-hook',
+ 'stages': ['commit-msg', 'pre-push', 'pre-commit', 'pre-merge-commit'],
+ }
diff --git a/tests/color_test.py b/tests/color_test.py
new file mode 100644
index 0000000..89b4fd3
--- /dev/null
+++ b/tests/color_test.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import sys
+from unittest import mock
+
+import pytest
+
+from pre_commit import envcontext
+from pre_commit.color import format_color
+from pre_commit.color import GREEN
+from pre_commit.color import use_color
+
+
+@pytest.mark.parametrize(
+ ('in_text', 'in_color', 'in_use_color', 'expected'), (
+ ('foo', GREEN, True, f'{GREEN}foo\033[m'),
+ ('foo', GREEN, False, 'foo'),
+ ),
+)
+def test_format_color(in_text, in_color, in_use_color, expected):
+ ret = format_color(in_text, in_color, in_use_color)
+ assert ret == expected
+
+
+def test_use_color_never():
+ assert use_color('never') is False
+
+
+def test_use_color_always():
+ assert use_color('always') is True
+
+
+def test_use_color_no_tty():
+ with mock.patch.object(sys.stderr, 'isatty', return_value=False):
+ assert use_color('auto') is False
+
+
+def test_use_color_tty_with_color_support():
+ with mock.patch.object(sys.stderr, 'isatty', return_value=True):
+ with mock.patch('pre_commit.color.terminal_supports_color', True):
+ with envcontext.envcontext((('TERM', envcontext.UNSET),)):
+ assert use_color('auto') is True
+
+
+def test_use_color_tty_without_color_support():
+ with mock.patch.object(sys.stderr, 'isatty', return_value=True):
+ with mock.patch('pre_commit.color.terminal_supports_color', False):
+ with envcontext.envcontext((('TERM', envcontext.UNSET),)):
+ assert use_color('auto') is False
+
+
+def test_use_color_dumb_term():
+ with mock.patch.object(sys.stderr, 'isatty', return_value=True):
+ with mock.patch('pre_commit.color.terminal_supports_color', True):
+ with envcontext.envcontext((('TERM', 'dumb'),)):
+ assert use_color('auto') is False
+
+
+def test_use_color_raises_if_given_shenanigans():
+ with pytest.raises(ValueError):
+ use_color('herpaderp')
diff --git a/tests/commands/__init__.py b/tests/commands/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/commands/__init__.py
diff --git a/tests/commands/autoupdate_test.py b/tests/commands/autoupdate_test.py
new file mode 100644
index 0000000..71bd044
--- /dev/null
+++ b/tests/commands/autoupdate_test.py
@@ -0,0 +1,532 @@
+from __future__ import annotations
+
+import shlex
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import envcontext
+from pre_commit import git
+from pre_commit import yaml
+from pre_commit.commands.autoupdate import _check_hooks_still_exist_at_rev
+from pre_commit.commands.autoupdate import autoupdate
+from pre_commit.commands.autoupdate import RepositoryCannotBeUpdatedError
+from pre_commit.commands.autoupdate import RevInfo
+from pre_commit.util import cmd_output
+from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import make_config_from_repo
+from testing.fixtures import make_repo
+from testing.fixtures import modify_manifest
+from testing.fixtures import read_config
+from testing.fixtures import sample_local_config
+from testing.fixtures import write_config
+from testing.util import git_commit
+
+
+@pytest.fixture
+def up_to_date(tempdir_factory):
+ yield make_repo(tempdir_factory, 'python_hooks_repo')
+
+
+@pytest.fixture
+def out_of_date(tempdir_factory):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ original_rev = git.head_rev(path)
+
+ git_commit(cwd=path)
+ head_rev = git.head_rev(path)
+
+ yield auto_namedtuple(
+ path=path, original_rev=original_rev, head_rev=head_rev,
+ )
+
+
+@pytest.fixture
+def tagged(out_of_date):
+ cmd_output('git', 'tag', 'v1.2.3', cwd=out_of_date.path)
+ yield out_of_date
+
+
+@pytest.fixture
+def hook_disappearing(tempdir_factory):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ original_rev = git.head_rev(path)
+
+ with modify_manifest(path) as manifest:
+ manifest[0]['id'] = 'bar'
+
+ yield auto_namedtuple(path=path, original_rev=original_rev)
+
+
+def test_rev_info_from_config():
+ info = RevInfo.from_config({'repo': 'repo/path', 'rev': 'v1.2.3'})
+ assert info == RevInfo('repo/path', 'v1.2.3', None)
+
+
+def test_rev_info_update_up_to_date_repo(up_to_date):
+ config = make_config_from_repo(up_to_date)
+ info = RevInfo.from_config(config)._replace(hook_ids=frozenset(('foo',)))
+ new_info = info.update(tags_only=False, freeze=False)
+ assert info == new_info
+
+
+def test_rev_info_update_out_of_date_repo(out_of_date):
+ config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev,
+ )
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=False, freeze=False)
+ assert new_info.rev == out_of_date.head_rev
+
+
+def test_rev_info_update_non_master_default_branch(out_of_date):
+ # change the default branch to be not-master
+ cmd_output('git', '-C', out_of_date.path, 'branch', '-m', 'dev')
+ test_rev_info_update_out_of_date_repo(out_of_date)
+
+
+def test_rev_info_update_tags_even_if_not_tags_only(tagged):
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=False, freeze=False)
+ assert new_info.rev == 'v1.2.3'
+
+
+def test_rev_info_update_tags_only_does_not_pick_tip(tagged):
+ git_commit(cwd=tagged.path)
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=True, freeze=False)
+ assert new_info.rev == 'v1.2.3'
+
+
+def test_rev_info_update_tags_prefers_version_tag(tagged, out_of_date):
+ cmd_output('git', 'tag', 'latest', cwd=out_of_date.path)
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=True, freeze=False)
+ assert new_info.rev == 'v1.2.3'
+
+
+def test_rev_info_update_tags_non_version_tag(out_of_date):
+ cmd_output('git', 'tag', 'latest', cwd=out_of_date.path)
+ config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev,
+ )
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=True, freeze=False)
+ assert new_info.rev == 'latest'
+
+
+def test_rev_info_update_freeze_tag(tagged):
+ git_commit(cwd=tagged.path)
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=True, freeze=True)
+ assert new_info.rev == tagged.head_rev
+ assert new_info.frozen == 'v1.2.3'
+
+
+def test_rev_info_update_does_not_freeze_if_already_sha(out_of_date):
+ config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev,
+ )
+ info = RevInfo.from_config(config)
+ new_info = info.update(tags_only=True, freeze=True)
+ assert new_info.rev == out_of_date.head_rev
+ assert new_info.frozen is None
+
+
+def test_autoupdate_up_to_date_repo(up_to_date, tmpdir):
+ contents = (
+ f'repos:\n'
+ f'- repo: {up_to_date}\n'
+ f' rev: {git.head_rev(up_to_date)}\n'
+ f' hooks:\n'
+ f' - id: foo\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(contents)
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ assert cfg.read() == contents
+
+
+def test_autoupdate_old_revision_broken(tempdir_factory, in_tmpdir):
+ """In $FUTURE_VERSION, hooks.yaml will no longer be supported. This
+ asserts that when that day comes, pre-commit will be able to autoupdate
+ despite not being able to read hooks.yaml in that repository.
+ """
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ config = make_config_from_repo(path, check=False)
+
+ cmd_output('git', 'mv', C.MANIFEST_FILE, 'nope.yaml', cwd=path)
+ git_commit(cwd=path)
+ # Assume this is the revision the user's old repository was at
+ rev = git.head_rev(path)
+ cmd_output('git', 'mv', 'nope.yaml', C.MANIFEST_FILE, cwd=path)
+ git_commit(cwd=path)
+ update_rev = git.head_rev(path)
+
+ config['rev'] = rev
+ write_config('.', config)
+ with open(C.CONFIG_FILE) as f:
+ before = f.read()
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 0
+ with open(C.CONFIG_FILE) as f:
+ after = f.read()
+ assert before != after
+ assert update_rev in after
+
+
+def test_autoupdate_out_of_date_repo(out_of_date, tmpdir):
+ fmt = (
+ 'repos:\n'
+ '- repo: {}\n'
+ ' rev: {}\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ assert cfg.read() == fmt.format(out_of_date.path, out_of_date.head_rev)
+
+
+def test_autoupdate_with_core_useBuiltinFSMonitor(out_of_date, tmpdir):
+ # force the setting on "globally" for git
+ home = tmpdir.join('fakehome').ensure_dir()
+ home.join('.gitconfig').write('[core]\nuseBuiltinFSMonitor = true\n')
+ with envcontext.envcontext((('HOME', str(home)),)):
+ test_autoupdate_out_of_date_repo(out_of_date, tmpdir)
+
+
+def test_autoupdate_pure_yaml(out_of_date, tmpdir):
+ with mock.patch.object(yaml, 'Dumper', yaml.yaml.SafeDumper):
+ test_autoupdate_out_of_date_repo(out_of_date, tmpdir)
+
+
+def test_autoupdate_only_one_to_update(up_to_date, out_of_date, tmpdir):
+ fmt = (
+ 'repos:\n'
+ '- repo: {}\n'
+ ' rev: {}\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ '- repo: {}\n'
+ ' rev: {}\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ before = fmt.format(
+ up_to_date, git.head_rev(up_to_date),
+ out_of_date.path, out_of_date.original_rev,
+ )
+ cfg.write(before)
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ assert cfg.read() == fmt.format(
+ up_to_date, git.head_rev(up_to_date),
+ out_of_date.path, out_of_date.head_rev,
+ )
+
+
+def test_autoupdate_out_of_date_repo_with_correct_repo_name(
+ out_of_date, in_tmpdir,
+):
+ stale_config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev, check=False,
+ )
+ local_config = sample_local_config()
+ config = {'repos': [stale_config, local_config]}
+ write_config('.', config)
+
+ with open(C.CONFIG_FILE) as f:
+ before = f.read()
+ repo_name = f'file://{out_of_date.path}'
+ ret = autoupdate(
+ C.CONFIG_FILE, freeze=False, tags_only=False,
+ repos=(repo_name,),
+ )
+ with open(C.CONFIG_FILE) as f:
+ after = f.read()
+ assert ret == 0
+ assert before != after
+ assert out_of_date.head_rev in after
+ assert 'local' in after
+
+
+def test_autoupdate_out_of_date_repo_with_wrong_repo_name(
+ out_of_date, in_tmpdir,
+):
+ config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev, check=False,
+ )
+ write_config('.', config)
+
+ with open(C.CONFIG_FILE) as f:
+ before = f.read()
+ # It will not update it, because the name doesn't match
+ ret = autoupdate(
+ C.CONFIG_FILE, freeze=False, tags_only=False,
+ repos=('dne',),
+ )
+ with open(C.CONFIG_FILE) as f:
+ after = f.read()
+ assert ret == 0
+ assert before == after
+
+
+def test_does_not_reformat(tmpdir, out_of_date):
+ fmt = (
+ 'repos:\n'
+ '- repo: {}\n'
+ ' rev: {} # definitely the version I want!\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' # These args are because reasons!\n'
+ ' args: [foo, bar, baz]\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(fmt.format(out_of_date.path, out_of_date.original_rev))
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ expected = fmt.format(out_of_date.path, out_of_date.head_rev)
+ assert cfg.read() == expected
+
+
+def test_does_not_change_mixed_endlines_read(up_to_date, tmpdir):
+ fmt = (
+ 'repos:\n'
+ '- repo: {}\n'
+ ' rev: {} # definitely the version I want!\r\n'
+ ' hooks:\r\n'
+ ' - id: foo\n'
+ ' # These args are because reasons!\r\n'
+ ' args: [foo, bar, baz]\r\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+
+ expected = fmt.format(up_to_date, git.head_rev(up_to_date)).encode()
+ cfg.write_binary(expected)
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ assert cfg.read_binary() == expected
+
+
+def test_does_not_change_mixed_endlines_write(tmpdir, out_of_date):
+ fmt = (
+ 'repos:\n'
+ '- repo: {}\n'
+ ' rev: {} # definitely the version I want!\r\n'
+ ' hooks:\r\n'
+ ' - id: foo\n'
+ ' # These args are because reasons!\r\n'
+ ' args: [foo, bar, baz]\r\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write_binary(
+ fmt.format(out_of_date.path, out_of_date.original_rev).encode(),
+ )
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ expected = fmt.format(out_of_date.path, out_of_date.head_rev).encode()
+ assert cfg.read_binary() == expected
+
+
+def test_loses_formatting_when_not_detectable(out_of_date, tmpdir):
+ """A best-effort attempt is made at updating rev without rewriting
+ formatting. When the original formatting cannot be detected, this
+ is abandoned.
+ """
+ config = (
+ 'repos: [\n'
+ ' {{\n'
+ ' repo: {}, rev: {},\n'
+ ' hooks: [\n'
+ ' # A comment!\n'
+ ' {{id: foo}},\n'
+ ' ],\n'
+ ' }}\n'
+ ']\n'.format(
+ shlex.quote(out_of_date.path), out_of_date.original_rev,
+ )
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(config)
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ expected = (
+ f'repos:\n'
+ f'- repo: {out_of_date.path}\n'
+ f' rev: {out_of_date.head_rev}\n'
+ f' hooks:\n'
+ f' - id: foo\n'
+ )
+ assert cfg.read() == expected
+
+
+def test_autoupdate_tagged_repo(tagged, in_tmpdir):
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ write_config('.', config)
+
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 0
+ with open(C.CONFIG_FILE) as f:
+ assert 'v1.2.3' in f.read()
+
+
+def test_autoupdate_freeze(tagged, in_tmpdir):
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ write_config('.', config)
+
+ assert autoupdate(C.CONFIG_FILE, freeze=True, tags_only=False) == 0
+ with open(C.CONFIG_FILE) as f:
+ expected = f'rev: {tagged.head_rev} # frozen: v1.2.3'
+ assert expected in f.read()
+
+ # if we un-freeze it should remove the frozen comment
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 0
+ with open(C.CONFIG_FILE) as f:
+ assert 'rev: v1.2.3\n' in f.read()
+
+
+def test_autoupdate_tags_only(tagged, in_tmpdir):
+ # add some commits after the tag
+ git_commit(cwd=tagged.path)
+
+ config = make_config_from_repo(tagged.path, rev=tagged.original_rev)
+ write_config('.', config)
+
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=True) == 0
+ with open(C.CONFIG_FILE) as f:
+ assert 'v1.2.3' in f.read()
+
+
+def test_autoupdate_latest_no_config(out_of_date, in_tmpdir):
+ config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev,
+ )
+ write_config('.', config)
+
+ cmd_output('git', 'rm', '-r', ':/', cwd=out_of_date.path)
+ git_commit(cwd=out_of_date.path)
+
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 1
+ with open(C.CONFIG_FILE) as f:
+ assert out_of_date.original_rev in f.read()
+
+
+def test_hook_disppearing_repo_raises(hook_disappearing):
+ config = make_config_from_repo(
+ hook_disappearing.path,
+ rev=hook_disappearing.original_rev,
+ hooks=[{'id': 'foo'}],
+ )
+ info = RevInfo.from_config(config).update(tags_only=False, freeze=False)
+ with pytest.raises(RepositoryCannotBeUpdatedError):
+ _check_hooks_still_exist_at_rev(config, info)
+
+
+def test_autoupdate_hook_disappearing_repo(hook_disappearing, tmpdir):
+ contents = (
+ f'repos:\n'
+ f'- repo: {hook_disappearing.path}\n'
+ f' rev: {hook_disappearing.original_rev}\n'
+ f' hooks:\n'
+ f' - id: foo\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(contents)
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 1
+ assert cfg.read() == contents
+
+
+def test_autoupdate_local_hooks(in_git_dir):
+ config = sample_local_config()
+ add_config_to_repo('.', config)
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 0
+ new_config_written = read_config('.')
+ assert len(new_config_written['repos']) == 1
+ assert new_config_written['repos'][0] == config
+
+
+def test_autoupdate_local_hooks_with_out_of_date_repo(
+ out_of_date, in_tmpdir,
+):
+ stale_config = make_config_from_repo(
+ out_of_date.path, rev=out_of_date.original_rev, check=False,
+ )
+ local_config = sample_local_config()
+ config = {'repos': [local_config, stale_config]}
+ write_config('.', config)
+ assert autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False) == 0
+ new_config_written = read_config('.')
+ assert len(new_config_written['repos']) == 2
+ assert new_config_written['repos'][0] == local_config
+
+
+def test_autoupdate_meta_hooks(tmpdir):
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(
+ 'repos:\n'
+ '- repo: meta\n'
+ ' hooks:\n'
+ ' - id: check-useless-excludes\n',
+ )
+ assert autoupdate(str(cfg), freeze=False, tags_only=True) == 0
+ assert cfg.read() == (
+ 'repos:\n'
+ '- repo: meta\n'
+ ' hooks:\n'
+ ' - id: check-useless-excludes\n'
+ )
+
+
+def test_updates_old_format_to_new_format(tmpdir, capsys):
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n',
+ )
+ assert autoupdate(str(cfg), freeze=False, tags_only=True) == 0
+ contents = cfg.read()
+ assert contents == (
+ 'repos:\n'
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n'
+ )
+ out, _ = capsys.readouterr()
+ assert out == 'Configuration has been migrated.\n'
+
+
+def test_maintains_rev_quoting_style(tmpdir, out_of_date):
+ fmt = (
+ 'repos:\n'
+ '- repo: {path}\n'
+ ' rev: "{rev}"\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ '- repo: {path}\n'
+ " rev: '{rev}'\n"
+ ' hooks:\n'
+ ' - id: foo\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(fmt.format(path=out_of_date.path, rev=out_of_date.original_rev))
+
+ assert autoupdate(str(cfg), freeze=False, tags_only=False) == 0
+ expected = fmt.format(path=out_of_date.path, rev=out_of_date.head_rev)
+ assert cfg.read() == expected
diff --git a/tests/commands/clean_test.py b/tests/commands/clean_test.py
new file mode 100644
index 0000000..dd8e4a5
--- /dev/null
+++ b/tests/commands/clean_test.py
@@ -0,0 +1,35 @@
+from __future__ import annotations
+
+import os.path
+from unittest import mock
+
+import pytest
+
+from pre_commit.commands.clean import clean
+
+
+@pytest.fixture(autouse=True)
+def fake_old_dir(tempdir_factory):
+ fake_old_dir = tempdir_factory.get()
+
+ def _expanduser(path, *args, **kwargs):
+ assert path == '~/.pre-commit'
+ return fake_old_dir
+
+ with mock.patch.object(os.path, 'expanduser', side_effect=_expanduser):
+ yield fake_old_dir
+
+
+def test_clean(store, fake_old_dir):
+ assert os.path.exists(fake_old_dir)
+ assert os.path.exists(store.directory)
+ clean(store)
+ assert not os.path.exists(fake_old_dir)
+ assert not os.path.exists(store.directory)
+
+
+def test_clean_idempotent(store):
+ clean(store)
+ assert not os.path.exists(store.directory)
+ clean(store)
+ assert not os.path.exists(store.directory)
diff --git a/tests/commands/gc_test.py b/tests/commands/gc_test.py
new file mode 100644
index 0000000..95113ed
--- /dev/null
+++ b/tests/commands/gc_test.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import os
+
+import pre_commit.constants as C
+from pre_commit import git
+from pre_commit.clientlib import load_config
+from pre_commit.commands.autoupdate import autoupdate
+from pre_commit.commands.gc import gc
+from pre_commit.commands.install_uninstall import install_hooks
+from pre_commit.repository import all_hooks
+from testing.fixtures import make_config_from_repo
+from testing.fixtures import make_repo
+from testing.fixtures import modify_config
+from testing.fixtures import sample_local_config
+from testing.fixtures import sample_meta_config
+from testing.fixtures import write_config
+from testing.util import git_commit
+
+
+def _repo_count(store):
+ return len(store.select_all_repos())
+
+
+def _config_count(store):
+ return len(store.select_all_configs())
+
+
+def _remove_config_assert_cleared(store, cap_out):
+ os.remove(C.CONFIG_FILE)
+ assert not gc(store)
+ assert _config_count(store) == 0
+ assert _repo_count(store) == 0
+ assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
+
+
+def test_gc(tempdir_factory, store, in_git_dir, cap_out):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ old_rev = git.head_rev(path)
+ git_commit(cwd=path)
+
+ write_config('.', make_config_from_repo(path, rev=old_rev))
+ store.mark_config_used(C.CONFIG_FILE)
+
+ # update will clone both the old and new repo, making the old one gc-able
+ assert not install_hooks(C.CONFIG_FILE, store)
+ assert not autoupdate(C.CONFIG_FILE, freeze=False, tags_only=False)
+ assert not install_hooks(C.CONFIG_FILE, store)
+
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 2
+ assert not gc(store)
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
+
+ _remove_config_assert_cleared(store, cap_out)
+
+
+def test_gc_repo_not_cloned(tempdir_factory, store, in_git_dir, cap_out):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ write_config('.', make_config_from_repo(path))
+ store.mark_config_used(C.CONFIG_FILE)
+
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 0
+ assert not gc(store)
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 0
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+
+def test_gc_meta_repo_does_not_crash(store, in_git_dir, cap_out):
+ write_config('.', sample_meta_config())
+ store.mark_config_used(C.CONFIG_FILE)
+ assert not gc(store)
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+
+def test_gc_local_repo_does_not_crash(store, in_git_dir, cap_out):
+ write_config('.', sample_local_config())
+ store.mark_config_used(C.CONFIG_FILE)
+ assert not gc(store)
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+
+def test_gc_unused_local_repo_with_env(store, in_git_dir, cap_out):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'flake8', 'name': 'flake8', 'entry': 'flake8',
+ # a `language: python` local hook will create an environment
+ 'types': ['python'], 'language': 'python',
+ }],
+ }
+ write_config('.', config)
+ store.mark_config_used(C.CONFIG_FILE)
+
+ # this causes the repositories to be created
+ all_hooks(load_config(C.CONFIG_FILE), store)
+
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert not gc(store)
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+ _remove_config_assert_cleared(store, cap_out)
+
+
+def test_gc_config_with_missing_hook(
+ tempdir_factory, store, in_git_dir, cap_out,
+):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ write_config('.', make_config_from_repo(path))
+ store.mark_config_used(C.CONFIG_FILE)
+ # to trigger a clone
+ all_hooks(load_config(C.CONFIG_FILE), store)
+
+ with modify_config() as config:
+ # add a hook which does not exist, make sure we don't crash
+ config['repos'][0]['hooks'].append({'id': 'does-not-exist'})
+
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert not gc(store)
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+ _remove_config_assert_cleared(store, cap_out)
+
+
+def test_gc_deletes_invalid_configs(store, in_git_dir, cap_out):
+ config = {'i am': 'invalid'}
+ write_config('.', config)
+ store.mark_config_used(C.CONFIG_FILE)
+
+ assert _config_count(store) == 1
+ assert not gc(store)
+ assert _config_count(store) == 0
+ assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
+
+
+def test_invalid_manifest_gcd(tempdir_factory, store, in_git_dir, cap_out):
+ # clean up repos from old pre-commit versions
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ write_config('.', make_config_from_repo(path))
+ store.mark_config_used(C.CONFIG_FILE)
+
+ # trigger a clone
+ install_hooks(C.CONFIG_FILE, store)
+
+ # we'll "break" the manifest to simulate an old version clone
+ (_, _, path), = store.select_all_repos()
+ os.remove(os.path.join(path, C.MANIFEST_FILE))
+
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 1
+ assert not gc(store)
+ assert _config_count(store) == 1
+ assert _repo_count(store) == 0
+ assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
diff --git a/tests/commands/hook_impl_test.py b/tests/commands/hook_impl_test.py
new file mode 100644
index 0000000..d757e85
--- /dev/null
+++ b/tests/commands/hook_impl_test.py
@@ -0,0 +1,381 @@
+from __future__ import annotations
+
+import subprocess
+import sys
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import git
+from pre_commit.commands import hook_impl
+from pre_commit.envcontext import envcontext
+from pre_commit.util import cmd_output
+from pre_commit.util import make_executable
+from testing.fixtures import git_dir
+from testing.fixtures import sample_local_config
+from testing.fixtures import write_config
+from testing.util import cwd
+from testing.util import git_commit
+
+
+def test_validate_config_file_exists(tmpdir):
+ cfg = tmpdir.join(C.CONFIG_FILE).ensure()
+ hook_impl._validate_config(0, cfg, True)
+
+
+def test_validate_config_missing(capsys):
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._validate_config(123, 'DNE.yaml', False)
+ ret, = excinfo.value.args
+ assert ret == 1
+ assert capsys.readouterr().out == (
+ 'No DNE.yaml file was found\n'
+ '- To temporarily silence this, run '
+ '`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\n'
+ '- To permanently silence this, install pre-commit with the '
+ '--allow-missing-config option\n'
+ '- To uninstall pre-commit run `pre-commit uninstall`\n'
+ )
+
+
+def test_validate_config_skip_missing_config(capsys):
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._validate_config(123, 'DNE.yaml', True)
+ ret, = excinfo.value.args
+ assert ret == 123
+ expected = '`DNE.yaml` config file not found. Skipping `pre-commit`.\n'
+ assert capsys.readouterr().out == expected
+
+
+def test_validate_config_skip_via_env_variable(capsys):
+ with pytest.raises(SystemExit) as excinfo:
+ with envcontext((('PRE_COMMIT_ALLOW_NO_CONFIG', '1'),)):
+ hook_impl._validate_config(0, 'DNE.yaml', False)
+ ret, = excinfo.value.args
+ assert ret == 0
+ expected = '`DNE.yaml` config file not found. Skipping `pre-commit`.\n'
+ assert capsys.readouterr().out == expected
+
+
+def test_run_legacy_does_not_exist(tmpdir):
+ retv, stdin = hook_impl._run_legacy('pre-commit', tmpdir, ())
+ assert (retv, stdin) == (0, b'')
+
+
+def test_run_legacy_executes_legacy_script(tmpdir, capfd):
+ hook = tmpdir.join('pre-commit.legacy')
+ hook.write('#!/usr/bin/env bash\necho hi "$@"\nexit 1\n')
+ make_executable(hook)
+ retv, stdin = hook_impl._run_legacy('pre-commit', tmpdir, ('arg1', 'arg2'))
+ assert capfd.readouterr().out.strip() == 'hi arg1 arg2'
+ assert (retv, stdin) == (1, b'')
+
+
+def test_run_legacy_pre_push_returns_stdin(tmpdir):
+ with mock.patch.object(sys.stdin.buffer, 'read', return_value=b'stdin'):
+ retv, stdin = hook_impl._run_legacy('pre-push', tmpdir, ())
+ assert (retv, stdin) == (0, b'stdin')
+
+
+def test_run_legacy_recursive(tmpdir):
+ hook = tmpdir.join('pre-commit.legacy').ensure()
+ make_executable(hook)
+
+ # simulate a call being recursive
+ def call(*_, **__):
+ return hook_impl._run_legacy('pre-commit', tmpdir, ())
+
+ with mock.patch.object(subprocess, 'run', call):
+ with pytest.raises(SystemExit):
+ call()
+
+
+@pytest.mark.parametrize(
+ ('hook_type', 'args'),
+ (
+ ('pre-commit', []),
+ ('pre-merge-commit', []),
+ ('pre-push', ['branch_name', 'remote_name']),
+ ('commit-msg', ['.git/COMMIT_EDITMSG']),
+ ('post-commit', []),
+ ('post-merge', ['1']),
+ ('pre-rebase', ['main', 'topic']),
+ ('pre-rebase', ['main']),
+ ('post-checkout', ['old_head', 'new_head', '1']),
+ ('post-rewrite', ['amend']),
+ # multiple choices for commit-editmsg
+ ('prepare-commit-msg', ['.git/COMMIT_EDITMSG']),
+ ('prepare-commit-msg', ['.git/COMMIT_EDITMSG', 'message']),
+ ('prepare-commit-msg', ['.git/COMMIT_EDITMSG', 'commit', 'deadbeef']),
+ ),
+)
+def test_check_args_length_ok(hook_type, args):
+ hook_impl._check_args_length(hook_type, args)
+
+
+def test_check_args_length_error_too_many_plural():
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._check_args_length('pre-commit', ['run', '--all-files'])
+ msg, = excinfo.value.args
+ assert msg == (
+ 'hook-impl for pre-commit expected 0 arguments but got 2: '
+ "['run', '--all-files']"
+ )
+
+
+def test_check_args_length_error_too_many_singular():
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._check_args_length('commit-msg', [])
+ msg, = excinfo.value.args
+ assert msg == 'hook-impl for commit-msg expected 1 argument but got 0: []'
+
+
+def test_check_args_length_prepare_commit_msg_error():
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._check_args_length('prepare-commit-msg', [])
+ msg, = excinfo.value.args
+ assert msg == (
+ 'hook-impl for prepare-commit-msg expected 1, 2, or 3 arguments '
+ 'but got 0: []'
+ )
+
+
+def test_check_args_length_pre_rebase_error():
+ with pytest.raises(SystemExit) as excinfo:
+ hook_impl._check_args_length('pre-rebase', [])
+ msg, = excinfo.value.args
+ assert msg == 'hook-impl for pre-rebase expected 1 or 2 arguments but got 0: []' # noqa: E501
+
+
+def test_run_ns_pre_commit():
+ ns = hook_impl._run_ns('pre-commit', True, (), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'pre-commit'
+ assert ns.color is True
+
+
+def test_run_ns_pre_rebase():
+ ns = hook_impl._run_ns('pre-rebase', True, ('main', 'topic'), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'pre-rebase'
+ assert ns.color is True
+ assert ns.pre_rebase_upstream == 'main'
+ assert ns.pre_rebase_branch == 'topic'
+
+ ns = hook_impl._run_ns('pre-rebase', True, ('main',), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'pre-rebase'
+ assert ns.color is True
+ assert ns.pre_rebase_upstream == 'main'
+ assert ns.pre_rebase_branch is None
+
+
+def test_run_ns_commit_msg():
+ ns = hook_impl._run_ns('commit-msg', False, ('.git/COMMIT_MSG',), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'commit-msg'
+ assert ns.color is False
+ assert ns.commit_msg_filename == '.git/COMMIT_MSG'
+
+
+def test_run_ns_prepare_commit_msg_one_arg():
+ ns = hook_impl._run_ns(
+ 'prepare-commit-msg', False,
+ ('.git/COMMIT_MSG',), b'',
+ )
+ assert ns is not None
+ assert ns.hook_stage == 'prepare-commit-msg'
+ assert ns.color is False
+ assert ns.commit_msg_filename == '.git/COMMIT_MSG'
+
+
+def test_run_ns_prepare_commit_msg_two_arg():
+ ns = hook_impl._run_ns(
+ 'prepare-commit-msg', False,
+ ('.git/COMMIT_MSG', 'message'), b'',
+ )
+ assert ns is not None
+ assert ns.hook_stage == 'prepare-commit-msg'
+ assert ns.color is False
+ assert ns.commit_msg_filename == '.git/COMMIT_MSG'
+ assert ns.prepare_commit_message_source == 'message'
+
+
+def test_run_ns_prepare_commit_msg_three_arg():
+ ns = hook_impl._run_ns(
+ 'prepare-commit-msg', False,
+ ('.git/COMMIT_MSG', 'message', 'HEAD'), b'',
+ )
+ assert ns is not None
+ assert ns.hook_stage == 'prepare-commit-msg'
+ assert ns.color is False
+ assert ns.commit_msg_filename == '.git/COMMIT_MSG'
+ assert ns.prepare_commit_message_source == 'message'
+ assert ns.commit_object_name == 'HEAD'
+
+
+def test_run_ns_post_commit():
+ ns = hook_impl._run_ns('post-commit', True, (), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'post-commit'
+ assert ns.color is True
+
+
+def test_run_ns_post_merge():
+ ns = hook_impl._run_ns('post-merge', True, ('1',), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'post-merge'
+ assert ns.color is True
+ assert ns.is_squash_merge == '1'
+
+
+def test_run_ns_post_rewrite():
+ ns = hook_impl._run_ns('post-rewrite', True, ('amend',), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'post-rewrite'
+ assert ns.color is True
+ assert ns.rewrite_command == 'amend'
+
+
+def test_run_ns_post_checkout():
+ ns = hook_impl._run_ns('post-checkout', True, ('a', 'b', 'c'), b'')
+ assert ns is not None
+ assert ns.hook_stage == 'post-checkout'
+ assert ns.color is True
+ assert ns.from_ref == 'a'
+ assert ns.to_ref == 'b'
+ assert ns.checkout_type == 'c'
+
+
+@pytest.fixture
+def push_example(tempdir_factory):
+ src = git_dir(tempdir_factory)
+ git_commit(cwd=src)
+ src_head = git.head_rev(src)
+
+ clone = tempdir_factory.get()
+ cmd_output('git', 'clone', src, clone)
+ git_commit(cwd=clone)
+ clone_head = git.head_rev(clone)
+ return (src, src_head, clone, clone_head)
+
+
+def test_run_ns_pre_push_updating_branch(push_example):
+ src, src_head, clone, clone_head = push_example
+
+ with cwd(clone):
+ args = ('origin', src)
+ stdin = f'HEAD {clone_head} refs/heads/b {src_head}\n'.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is not None
+ assert ns.hook_stage == 'pre-push'
+ assert ns.color is False
+ assert ns.remote_name == 'origin'
+ assert ns.remote_url == src
+ assert ns.from_ref == src_head
+ assert ns.to_ref == clone_head
+ assert ns.all_files is False
+
+
+def test_run_ns_pre_push_new_branch(push_example):
+ src, src_head, clone, clone_head = push_example
+
+ with cwd(clone):
+ args = ('origin', src)
+ stdin = f'HEAD {clone_head} refs/heads/b {hook_impl.Z40}\n'.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is not None
+ assert ns.from_ref == src_head
+ assert ns.to_ref == clone_head
+
+
+def test_run_ns_pre_push_new_branch_existing_rev(push_example):
+ src, src_head, clone, _ = push_example
+
+ with cwd(clone):
+ args = ('origin', src)
+ stdin = f'HEAD {src_head} refs/heads/b2 {hook_impl.Z40}\n'.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is None
+
+
+def test_run_ns_pre_push_ref_with_whitespace(push_example):
+ src, src_head, clone, _ = push_example
+
+ with cwd(clone):
+ args = ('origin', src)
+ line = f'HEAD^{{/ }} {src_head} refs/heads/b2 {hook_impl.Z40}\n'
+ stdin = line.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is None
+
+
+def test_pushing_orphan_branch(push_example):
+ src, src_head, clone, _ = push_example
+
+ cmd_output('git', 'checkout', '--orphan', 'b2', cwd=clone)
+ git_commit(cwd=clone, msg='something else to get unique hash')
+ clone_rev = git.head_rev(clone)
+
+ with cwd(clone):
+ args = ('origin', src)
+ stdin = f'HEAD {clone_rev} refs/heads/b2 {hook_impl.Z40}\n'.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is not None
+ assert ns.all_files is True
+
+
+def test_run_ns_pre_push_deleting_branch(push_example):
+ src, src_head, clone, _ = push_example
+
+ with cwd(clone):
+ args = ('origin', src)
+ stdin = f'(delete) {hook_impl.Z40} refs/heads/b {src_head}'.encode()
+ ns = hook_impl._run_ns('pre-push', False, args, stdin)
+
+ assert ns is None
+
+
+def test_hook_impl_main_noop_pre_push(cap_out, store, push_example):
+ src, src_head, clone, _ = push_example
+
+ stdin = f'(delete) {hook_impl.Z40} refs/heads/b {src_head}'.encode()
+ with mock.patch.object(sys.stdin.buffer, 'read', return_value=stdin):
+ with cwd(clone):
+ write_config('.', sample_local_config())
+ ret = hook_impl.hook_impl(
+ store,
+ config=C.CONFIG_FILE,
+ color=False,
+ hook_type='pre-push',
+ hook_dir='.git/hooks',
+ skip_on_missing_config=False,
+ args=('origin', src),
+ )
+ assert ret == 0
+ assert cap_out.get() == ''
+
+
+def test_hook_impl_main_runs_hooks(cap_out, tempdir_factory, store):
+ with cwd(git_dir(tempdir_factory)):
+ write_config('.', sample_local_config())
+ ret = hook_impl.hook_impl(
+ store,
+ config=C.CONFIG_FILE,
+ color=False,
+ hook_type='pre-commit',
+ hook_dir='.git/hooks',
+ skip_on_missing_config=False,
+ args=(),
+ )
+ assert ret == 0
+ expected = '''\
+Block if "DO NOT COMMIT" is found....................(no files to check)Skipped
+'''
+ assert cap_out.get() == expected
diff --git a/tests/commands/init_templatedir_test.py b/tests/commands/init_templatedir_test.py
new file mode 100644
index 0000000..28f29b7
--- /dev/null
+++ b/tests/commands/init_templatedir_test.py
@@ -0,0 +1,142 @@
+from __future__ import annotations
+
+import os.path
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit.commands.init_templatedir import init_templatedir
+from pre_commit.envcontext import envcontext
+from pre_commit.util import cmd_output
+from testing.fixtures import git_dir
+from testing.fixtures import make_consuming_repo
+from testing.util import cmd_output_mocked_pre_commit_home
+from testing.util import cwd
+from testing.util import git_commit
+
+
+def test_init_templatedir(tmpdir, tempdir_factory, store, cap_out):
+ target = str(tmpdir.join('tmpl'))
+ init_templatedir(C.CONFIG_FILE, store, target, hook_types=['pre-commit'])
+ lines = cap_out.get().splitlines()
+ assert lines[0].startswith('pre-commit installed at ')
+ assert lines[1] == (
+ '[WARNING] `init.templateDir` not set to the target directory'
+ )
+ assert lines[2].startswith(
+ '[WARNING] maybe `git config --global init.templateDir',
+ )
+
+ with envcontext((('GIT_TEMPLATE_DIR', target),)):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+
+ with cwd(path):
+ retcode, output = git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ )
+ assert retcode == 0
+ assert 'Bash hook....' in output
+
+
+def test_init_templatedir_already_set(tmpdir, tempdir_factory, store, cap_out):
+ target = str(tmpdir.join('tmpl'))
+ tmp_git_dir = git_dir(tempdir_factory)
+ with cwd(tmp_git_dir):
+ cmd_output('git', 'config', 'init.templateDir', target)
+ init_templatedir(
+ C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
+ )
+
+ lines = cap_out.get().splitlines()
+ assert len(lines) == 1
+ assert lines[0].startswith('pre-commit installed at')
+
+
+def test_init_templatedir_not_set(tmpdir, store, cap_out):
+ # set HOME to ignore the current `.gitconfig`
+ with envcontext((('HOME', str(tmpdir)),)):
+ with tmpdir.join('tmpl').ensure_dir().as_cwd():
+ # we have not set init.templateDir so this should produce a warning
+ init_templatedir(
+ C.CONFIG_FILE, store, '.', hook_types=['pre-commit'],
+ )
+
+ lines = cap_out.get().splitlines()
+ assert len(lines) == 3
+ assert lines[1] == (
+ '[WARNING] `init.templateDir` not set to the target directory'
+ )
+
+
+def test_init_templatedir_expanduser(tmpdir, tempdir_factory, store, cap_out):
+ target = str(tmpdir.join('tmpl'))
+ tmp_git_dir = git_dir(tempdir_factory)
+ with cwd(tmp_git_dir):
+ cmd_output('git', 'config', 'init.templateDir', '~/templatedir')
+ with mock.patch.object(os.path, 'expanduser', return_value=target):
+ init_templatedir(
+ C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
+ )
+
+ lines = cap_out.get().splitlines()
+ assert len(lines) == 1
+ assert lines[0].startswith('pre-commit installed at')
+
+
+def test_init_templatedir_hookspath_set(tmpdir, tempdir_factory, store):
+ target = tmpdir.join('tmpl')
+ tmp_git_dir = git_dir(tempdir_factory)
+ with cwd(tmp_git_dir):
+ cmd_output('git', 'config', '--local', 'core.hooksPath', 'hooks')
+ init_templatedir(
+ C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
+ )
+ assert target.join('hooks/pre-commit').exists()
+
+
+@pytest.mark.parametrize(
+ ('skip', 'commit_retcode', 'commit_output_snippet'),
+ (
+ (True, 0, 'Skipping `pre-commit`.'),
+ (False, 1, f'No {C.CONFIG_FILE} file was found'),
+ ),
+)
+def test_init_templatedir_skip_on_missing_config(
+ tmpdir,
+ tempdir_factory,
+ store,
+ cap_out,
+ skip,
+ commit_retcode,
+ commit_output_snippet,
+):
+ target = str(tmpdir.join('tmpl'))
+ init_git_dir = git_dir(tempdir_factory)
+ with cwd(init_git_dir):
+ cmd_output('git', 'config', 'init.templateDir', target)
+ init_templatedir(
+ C.CONFIG_FILE,
+ store,
+ target,
+ hook_types=['pre-commit'],
+ skip_on_missing_config=skip,
+ )
+
+ lines = cap_out.get().splitlines()
+ assert len(lines) == 1
+ assert lines[0].startswith('pre-commit installed at')
+
+ with envcontext((('GIT_TEMPLATE_DIR', target),)):
+ verify_git_dir = git_dir(tempdir_factory)
+
+ with cwd(verify_git_dir):
+ retcode, output = git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ check=False,
+ )
+
+ assert retcode == commit_retcode
+ assert commit_output_snippet in output
diff --git a/tests/commands/install_uninstall_test.py b/tests/commands/install_uninstall_test.py
new file mode 100644
index 0000000..9eb0e74
--- /dev/null
+++ b/tests/commands/install_uninstall_test.py
@@ -0,0 +1,1104 @@
+from __future__ import annotations
+
+import os.path
+import re
+
+import re_assert
+
+import pre_commit.constants as C
+from pre_commit import git
+from pre_commit.commands.install_uninstall import _hook_types
+from pre_commit.commands.install_uninstall import CURRENT_HASH
+from pre_commit.commands.install_uninstall import install
+from pre_commit.commands.install_uninstall import install_hooks
+from pre_commit.commands.install_uninstall import is_our_script
+from pre_commit.commands.install_uninstall import PRIOR_HASHES
+from pre_commit.commands.install_uninstall import uninstall
+from pre_commit.parse_shebang import find_executable
+from pre_commit.util import cmd_output
+from pre_commit.util import make_executable
+from pre_commit.util import resource_text
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import git_dir
+from testing.fixtures import make_consuming_repo
+from testing.fixtures import remove_config_from_repo
+from testing.fixtures import write_config
+from testing.util import cmd_output_mocked_pre_commit_home
+from testing.util import cwd
+from testing.util import git_commit
+
+
+def test_hook_types_explicitly_listed():
+ assert _hook_types(os.devnull, ['pre-push']) == ['pre-push']
+
+
+def test_hook_types_default_value_when_not_specified():
+ assert _hook_types(os.devnull, None) == ['pre-commit']
+
+
+def test_hook_types_configured(tmpdir):
+ cfg = tmpdir.join('t.cfg')
+ cfg.write('default_install_hook_types: [pre-push]\nrepos: []\n')
+
+ assert _hook_types(str(cfg), None) == ['pre-push']
+
+
+def test_hook_types_configured_nonsense(tmpdir):
+ cfg = tmpdir.join('t.cfg')
+ cfg.write('default_install_hook_types: []\nrepos: []\n')
+
+ # hopefully the user doesn't do this, but the code allows it!
+ assert _hook_types(str(cfg), None) == []
+
+
+def test_hook_types_configuration_has_error(tmpdir):
+ cfg = tmpdir.join('t.cfg')
+ cfg.write('[')
+
+ assert _hook_types(str(cfg), None) == ['pre-commit']
+
+
+def test_is_not_script():
+ assert is_our_script('setup.py') is False
+
+
+def test_is_script():
+ assert is_our_script('pre_commit/resources/hook-tmpl')
+
+
+def test_is_previous_pre_commit(tmpdir):
+ f = tmpdir.join('foo')
+ f.write(f'{PRIOR_HASHES[0].decode()}\n')
+ assert is_our_script(f.strpath)
+
+
+def test_install_pre_commit(in_git_dir, store):
+ assert not install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ assert os.access(in_git_dir.join('.git/hooks/pre-commit').strpath, os.X_OK)
+
+ assert not install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ assert os.access(in_git_dir.join('.git/hooks/pre-push').strpath, os.X_OK)
+
+
+def test_install_hooks_directory_not_present(in_git_dir, store):
+ # Simulate some git clients which don't make .git/hooks #234
+ if in_git_dir.join('.git/hooks').exists(): # pragma: no cover (odd git)
+ in_git_dir.join('.git/hooks').remove()
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ assert in_git_dir.join('.git/hooks/pre-commit').exists()
+
+
+def test_install_multiple_hooks_at_once(in_git_dir, store):
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit', 'pre-push'])
+ assert in_git_dir.join('.git/hooks/pre-commit').exists()
+ assert in_git_dir.join('.git/hooks/pre-push').exists()
+ uninstall(C.CONFIG_FILE, hook_types=['pre-commit', 'pre-push'])
+ assert not in_git_dir.join('.git/hooks/pre-commit').exists()
+ assert not in_git_dir.join('.git/hooks/pre-push').exists()
+
+
+def test_install_refuses_core_hookspath(in_git_dir, store):
+ cmd_output('git', 'config', '--local', 'core.hooksPath', 'hooks')
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+
+
+def test_install_hooks_dead_symlink(in_git_dir, store):
+ hook = in_git_dir.join('.git/hooks').ensure_dir().join('pre-commit')
+ os.symlink('/fake/baz', hook.strpath)
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ assert hook.exists()
+
+
+def test_uninstall_does_not_blow_up_when_not_there(in_git_dir):
+ assert uninstall(C.CONFIG_FILE, hook_types=['pre-commit']) == 0
+
+
+def test_uninstall(in_git_dir, store):
+ assert not in_git_dir.join('.git/hooks/pre-commit').exists()
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ assert in_git_dir.join('.git/hooks/pre-commit').exists()
+ uninstall(C.CONFIG_FILE, hook_types=['pre-commit'])
+ assert not in_git_dir.join('.git/hooks/pre-commit').exists()
+
+
+def _get_commit_output(tempdir_factory, touch_file='foo', **kwargs):
+ open(touch_file, 'a').close()
+ cmd_output('git', 'add', touch_file)
+ return git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ check=False,
+ tempdir_factory=tempdir_factory,
+ **kwargs,
+ )
+
+
+# osx does this different :(
+FILES_CHANGED = (
+ r'('
+ r' 1 file changed, 0 insertions\(\+\), 0 deletions\(-\)\n'
+ r'|'
+ r' 0 files changed\n'
+ r')'
+)
+
+
+NORMAL_PRE_COMMIT_RUN = re_assert.Matches(
+ fr'^\[INFO\] Initializing environment for .+\.\n'
+ fr'Bash hook\.+Passed\n'
+ fr'\[master [a-f0-9]{{7}}\] commit!\n'
+ fr'{FILES_CHANGED}'
+ fr' create mode 100644 foo\n$',
+)
+
+
+def test_install_pre_commit_and_run(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_install_pre_commit_and_run_custom_path(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ cmd_output('git', 'mv', C.CONFIG_FILE, 'custom.yaml')
+ git_commit(cwd=path)
+ assert install('custom.yaml', store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_install_in_submodule_and_run(tempdir_factory, store):
+ src_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ parent_path = git_dir(tempdir_factory)
+ cmd_output('git', 'submodule', 'add', src_path, 'sub', cwd=parent_path)
+ git_commit(cwd=parent_path)
+
+ sub_pth = os.path.join(parent_path, 'sub')
+ with cwd(sub_pth):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_install_in_worktree_and_run(tempdir_factory, store):
+ src_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ path = tempdir_factory.get()
+ cmd_output('git', '-C', src_path, 'branch', '-m', 'notmaster')
+ cmd_output('git', '-C', src_path, 'worktree', 'add', path, '-b', 'master')
+
+ with cwd(path):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_commit_am(tempdir_factory, store):
+ """Regression test for #322."""
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ # Make an unstaged change
+ open('unstaged', 'w').close()
+ cmd_output('git', 'add', '.')
+ git_commit(cwd=path)
+ with open('unstaged', 'w') as foo_file:
+ foo_file.write('Oh hai')
+
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+
+
+def test_unicode_merge_commit_message(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ cmd_output('git', 'checkout', 'master', '-b', 'foo')
+ git_commit('-n', cwd=path)
+ cmd_output('git', 'checkout', 'master')
+ cmd_output('git', 'merge', 'foo', '--no-ff', '--no-commit', '-m', 'β˜ƒ')
+ # Used to crash
+ git_commit(
+ '--no-edit',
+ msg=None,
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ )
+
+
+def test_install_idempotent(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def _path_without_us():
+ # Choose a path which *probably* doesn't include us
+ env = dict(os.environ)
+ exe = find_executable('pre-commit', env=env)
+ while exe:
+ parts = env['PATH'].split(os.pathsep)
+ after = [
+ x for x in parts
+ if x.lower().rstrip(os.sep) != os.path.dirname(exe).lower()
+ ]
+ if parts == after:
+ raise AssertionError(exe, parts)
+ env['PATH'] = os.pathsep.join(after)
+ exe = find_executable('pre-commit', env=env)
+ return env['PATH']
+
+
+def test_environment_not_sourced(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ assert not install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ # simulate deleting the virtualenv by rewriting the exe
+ hook = os.path.join(path, '.git/hooks/pre-commit')
+ with open(hook) as f:
+ src = f.read()
+ src = re.sub('\nINSTALL_PYTHON=.*\n', '\nINSTALL_PYTHON="/dne"\n', src)
+ with open(hook, 'w') as f:
+ f.write(src)
+
+ # Use a specific homedir to ignore --user installs
+ homedir = tempdir_factory.get()
+ env = {
+ 'HOME': homedir,
+ 'PATH': _path_without_us(),
+ # Git needs this to make a commit
+ 'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
+ 'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
+ 'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
+ 'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
+ }
+ if os.name == 'nt' and 'PATHEXT' in os.environ: # pragma: no cover
+ env['PATHEXT'] = os.environ['PATHEXT']
+
+ ret, out = git_commit(env=env, check=False)
+ assert ret == 1
+ assert out == (
+ '`pre-commit` not found. '
+ 'Did you forget to activate your virtualenv?\n'
+ )
+
+
+FAILING_PRE_COMMIT_RUN = re_assert.Matches(
+ r'^\[INFO\] Initializing environment for .+\.\n'
+ r'Failing hook\.+Failed\n'
+ r'- hook id: failing_hook\n'
+ r'- exit code: 1\n'
+ r'\n'
+ r'Fail\n'
+ r'foo\n'
+ r'\n$',
+)
+
+
+def test_failing_hooks_returns_nonzero(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
+ with cwd(path):
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 1
+ FAILING_PRE_COMMIT_RUN.assert_matches(output)
+
+
+EXISTING_COMMIT_RUN = re_assert.Matches(
+ fr'^legacy hook\n'
+ fr'\[master [a-f0-9]{{7}}\] commit!\n'
+ fr'{FILES_CHANGED}'
+ fr' create mode 100644 baz\n$',
+)
+
+
+def _write_legacy_hook(path):
+ os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
+ with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
+ f.write('#!/usr/bin/env bash\necho legacy hook\n')
+ make_executable(f.name)
+
+
+def test_install_existing_hooks_no_overwrite(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ _write_legacy_hook(path)
+
+ # Make sure we installed the "old" hook correctly
+ ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
+ assert ret == 0
+ EXISTING_COMMIT_RUN.assert_matches(output)
+
+ # Now install pre-commit (no-overwrite)
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ # We should run both the legacy and pre-commit hooks
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ legacy = 'legacy hook\n'
+ assert output.startswith(legacy)
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output.removeprefix(legacy))
+
+
+def test_legacy_overwriting_legacy_hook(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ _write_legacy_hook(path)
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ _write_legacy_hook(path)
+ # this previously crashed on windows. See #1010
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+
+def test_install_existing_hook_no_overwrite_idempotent(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ _write_legacy_hook(path)
+
+ # Install twice
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ # We should run both the legacy and pre-commit hooks
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ legacy = 'legacy hook\n'
+ assert output.startswith(legacy)
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output.removeprefix(legacy))
+
+
+def test_install_with_existing_non_utf8_script(tmpdir, store):
+ cmd_output('git', 'init', str(tmpdir))
+ tmpdir.join('.git/hooks').ensure_dir()
+ tmpdir.join('.git/hooks/pre-commit').write_binary(
+ b'#!/usr/bin/env bash\n'
+ b'# garbage: \xa0\xef\x12\xf2\n'
+ b'echo legacy hook\n',
+ )
+
+ with tmpdir.as_cwd():
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+
+FAIL_OLD_HOOK = re_assert.Matches(
+ r'fail!\n'
+ r'\[INFO\] Initializing environment for .+\.\n'
+ r'Bash hook\.+Passed\n',
+)
+
+
+def test_failing_existing_hook_returns_1(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ # Write out a failing "old" hook
+ os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
+ with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
+ f.write('#!/usr/bin/env bash\necho "fail!"\nexit 1\n')
+ make_executable(f.name)
+
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ # We should get a failure from the legacy hook
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 1
+ FAIL_OLD_HOOK.assert_matches(output)
+
+
+def test_install_overwrite_no_existing_hooks(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ assert not install(
+ C.CONFIG_FILE, store, hook_types=['pre-commit'], overwrite=True,
+ )
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_install_overwrite(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ _write_legacy_hook(path)
+ assert not install(
+ C.CONFIG_FILE, store, hook_types=['pre-commit'], overwrite=True,
+ )
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_uninstall_restores_legacy_hooks(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ _write_legacy_hook(path)
+
+ # Now install and uninstall pre-commit
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+ assert uninstall(C.CONFIG_FILE, hook_types=['pre-commit']) == 0
+
+ # Make sure we installed the "old" hook correctly
+ ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
+ assert ret == 0
+ EXISTING_COMMIT_RUN.assert_matches(output)
+
+
+def test_replace_old_commit_script(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ # Install a script that looks like our old script
+ pre_commit_contents = resource_text('hook-tmpl')
+ new_contents = pre_commit_contents.replace(
+ CURRENT_HASH.decode(), PRIOR_HASHES[-1].decode(),
+ )
+
+ os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
+ with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
+ f.write(new_contents)
+ make_executable(f.name)
+
+ # Install normally
+ assert install(C.CONFIG_FILE, store, hook_types=['pre-commit']) == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def test_uninstall_doesnt_remove_not_our_hooks(in_git_dir):
+ pre_commit = in_git_dir.join('.git/hooks').ensure_dir().join('pre-commit')
+ pre_commit.write('#!/usr/bin/env bash\necho 1\n')
+ make_executable(pre_commit.strpath)
+
+ assert uninstall(C.CONFIG_FILE, hook_types=['pre-commit']) == 0
+
+ assert pre_commit.exists()
+
+
+PRE_INSTALLED = re_assert.Matches(
+ fr'Bash hook\.+Passed\n'
+ fr'\[master [a-f0-9]{{7}}\] commit!\n'
+ fr'{FILES_CHANGED}'
+ fr' create mode 100644 foo\n$',
+)
+
+
+def test_installs_hooks_with_hooks_True(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'], hooks=True)
+ ret, output = _get_commit_output(
+ tempdir_factory, pre_commit_home=store.directory,
+ )
+
+ assert ret == 0
+ PRE_INSTALLED.assert_matches(output)
+
+
+def test_install_hooks_command(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ install_hooks(C.CONFIG_FILE, store)
+ ret, output = _get_commit_output(
+ tempdir_factory, pre_commit_home=store.directory,
+ )
+
+ assert ret == 0
+ PRE_INSTALLED.assert_matches(output)
+
+
+def test_installed_from_venv(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ # No environment so pre-commit is not on the path when running!
+ # Should still pick up the python from when we installed
+ ret, output = _get_commit_output(
+ tempdir_factory,
+ env={
+ 'HOME': os.path.expanduser('~'),
+ 'PATH': _path_without_us(),
+ 'TERM': os.environ.get('TERM', ''),
+ # Windows needs this to import `random`
+ 'SYSTEMROOT': os.environ.get('SYSTEMROOT', ''),
+ # Windows needs this to resolve executables
+ 'PATHEXT': os.environ.get('PATHEXT', ''),
+ # Git needs this to make a commit
+ 'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
+ 'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
+ 'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
+ 'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
+ },
+ )
+ assert ret == 0
+ NORMAL_PRE_COMMIT_RUN.assert_matches(output)
+
+
+def _get_push_output(tempdir_factory, remote='origin', opts=()):
+ return cmd_output_mocked_pre_commit_home(
+ 'git', 'push', remote, 'HEAD:new_branch', *opts,
+ tempdir_factory=tempdir_factory,
+ check=False,
+ )[:2]
+
+
+def test_pre_push_integration_failing(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
+ path = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path)
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ # commit succeeds because pre-commit is only installed for pre-push
+ assert _get_commit_output(tempdir_factory)[0] == 0
+ assert _get_commit_output(tempdir_factory, touch_file='zzz')[0] == 0
+
+ retc, output = _get_push_output(tempdir_factory)
+ assert retc == 1
+ assert 'Failing hook' in output
+ assert 'Failed' in output
+ assert 'foo zzz' in output # both filenames should be printed
+ assert 'hook id: failing_hook' in output
+
+
+def test_pre_push_integration_accepted(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ path = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path)
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ assert _get_commit_output(tempdir_factory)[0] == 0
+
+ retc, output = _get_push_output(tempdir_factory)
+ assert retc == 0
+ assert 'Bash hook' in output
+ assert 'Passed' in output
+
+
+def test_pre_push_force_push_without_fetch(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ path1 = tempdir_factory.get()
+ path2 = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path1)
+ cmd_output('git', 'clone', upstream, path2)
+ with cwd(path1):
+ assert _get_commit_output(tempdir_factory)[0] == 0
+ assert _get_push_output(tempdir_factory)[0] == 0
+
+ with cwd(path2):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ assert _get_commit_output(tempdir_factory, msg='force!')[0] == 0
+
+ retc, output = _get_push_output(tempdir_factory, opts=('--force',))
+ assert retc == 0
+ assert 'Bash hook' in output
+ assert 'Passed' in output
+
+
+def test_pre_push_new_upstream(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ upstream2 = git_dir(tempdir_factory)
+ path = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path)
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ assert _get_commit_output(tempdir_factory)[0] == 0
+
+ cmd_output('git', 'remote', 'rename', 'origin', 'upstream')
+ cmd_output('git', 'remote', 'add', 'origin', upstream2)
+ retc, output = _get_push_output(tempdir_factory)
+ assert retc == 0
+ assert 'Bash hook' in output
+ assert 'Passed' in output
+
+
+def test_pre_push_environment_variables(tempdir_factory, store):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'print-remote-info',
+ 'name': 'print remote info',
+ 'entry': 'bash -c "echo remote: $PRE_COMMIT_REMOTE_NAME"',
+ 'language': 'system',
+ 'verbose': True,
+ },
+ ],
+ }
+
+ upstream = git_dir(tempdir_factory)
+ clone = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, clone)
+ add_config_to_repo(clone, config)
+ with cwd(clone):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+
+ cmd_output('git', 'remote', 'rename', 'origin', 'origin2')
+ retc, output = _get_push_output(tempdir_factory, remote='origin2')
+ assert retc == 0
+ assert '\nremote: origin2\n' in output
+
+
+def test_pre_push_integration_empty_push(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ path = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path)
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ _get_push_output(tempdir_factory)
+ retc, output = _get_push_output(tempdir_factory)
+ assert output == 'Everything up-to-date\n'
+ assert retc == 0
+
+
+def test_pre_push_legacy(tempdir_factory, store):
+ upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ path = tempdir_factory.get()
+ cmd_output('git', 'clone', upstream, path)
+ with cwd(path):
+ os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
+ with open(os.path.join(path, '.git/hooks/pre-push'), 'w') as f:
+ f.write(
+ '#!/usr/bin/env bash\n'
+ 'set -eu\n'
+ 'read lr ls rr rs\n'
+ 'test -n "$lr" -a -n "$ls" -a -n "$rr" -a -n "$rs"\n'
+ 'echo legacy\n',
+ )
+ make_executable(f.name)
+
+ install(C.CONFIG_FILE, store, hook_types=['pre-push'])
+ assert _get_commit_output(tempdir_factory)[0] == 0
+
+ retc, output = _get_push_output(tempdir_factory)
+ assert retc == 0
+ first_line, _, third_line = output.splitlines()[:3]
+ assert first_line == 'legacy'
+ assert third_line.startswith('Bash hook')
+ assert third_line.endswith('Passed')
+
+
+def test_commit_msg_integration_failing(
+ commit_msg_repo, tempdir_factory, store,
+):
+ install(C.CONFIG_FILE, store, hook_types=['commit-msg'])
+ retc, out = _get_commit_output(tempdir_factory)
+ assert retc == 1
+ assert out == '''\
+Must have "Signed off by:"...............................................Failed
+- hook id: must-have-signoff
+- exit code: 1
+'''
+
+
+def test_commit_msg_integration_passing(
+ commit_msg_repo, tempdir_factory, store,
+):
+ install(C.CONFIG_FILE, store, hook_types=['commit-msg'])
+ msg = 'Hi\nSigned off by: me, lol'
+ retc, out = _get_commit_output(tempdir_factory, msg=msg)
+ assert retc == 0
+ first_line = out.splitlines()[0]
+ assert first_line.startswith('Must have "Signed off by:"...')
+ assert first_line.endswith('...Passed')
+
+
+def test_commit_msg_legacy(commit_msg_repo, tempdir_factory, store):
+ hook_path = os.path.join(commit_msg_repo, '.git/hooks/commit-msg')
+ os.makedirs(os.path.dirname(hook_path), exist_ok=True)
+ with open(hook_path, 'w') as hook_file:
+ hook_file.write(
+ '#!/usr/bin/env bash\n'
+ 'set -eu\n'
+ 'test -e "$1"\n'
+ 'echo legacy\n',
+ )
+ make_executable(hook_path)
+
+ install(C.CONFIG_FILE, store, hook_types=['commit-msg'])
+
+ msg = 'Hi\nSigned off by: asottile'
+ retc, out = _get_commit_output(tempdir_factory, msg=msg)
+ assert retc == 0
+ first_line, second_line = out.splitlines()[:2]
+ assert first_line == 'legacy'
+ assert second_line.startswith('Must have "Signed off by:"...')
+
+
+def test_post_commit_integration(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'post-commit',
+ 'name': 'Post commit',
+ 'entry': 'touch post-commit.tmp',
+ 'language': 'system',
+ 'always_run': True,
+ 'verbose': True,
+ 'stages': ['post-commit'],
+ }],
+ },
+ ],
+ }
+ write_config(path, config)
+ with cwd(path):
+ _get_commit_output(tempdir_factory)
+ assert not os.path.exists('post-commit.tmp')
+
+ install(C.CONFIG_FILE, store, hook_types=['post-commit'])
+ _get_commit_output(tempdir_factory)
+ assert os.path.exists('post-commit.tmp')
+
+
+def test_post_merge_integration(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'post-merge',
+ 'name': 'Post merge',
+ 'entry': 'touch post-merge.tmp',
+ 'language': 'system',
+ 'always_run': True,
+ 'verbose': True,
+ 'stages': ['post-merge'],
+ }],
+ },
+ ],
+ }
+ write_config(path, config)
+ with cwd(path):
+ # create a simple diamond of commits for a non-trivial merge
+ open('init', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ open('master', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ cmd_output('git', 'checkout', '-b', 'branch', 'HEAD^')
+ open('branch', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ cmd_output('git', 'checkout', 'master')
+ install(C.CONFIG_FILE, store, hook_types=['post-merge'])
+ retc, stdout, stderr = cmd_output_mocked_pre_commit_home(
+ 'git', 'merge', 'branch',
+ tempdir_factory=tempdir_factory,
+ )
+ assert retc == 0
+ assert os.path.exists('post-merge.tmp')
+
+
+def test_pre_rebase_integration(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'pre-rebase',
+ 'name': 'Pre rebase',
+ 'entry': 'touch pre-rebase.tmp',
+ 'language': 'system',
+ 'always_run': True,
+ 'verbose': True,
+ 'stages': ['pre-rebase'],
+ }],
+ },
+ ],
+ }
+ write_config(path, config)
+ with cwd(path):
+ install(C.CONFIG_FILE, store, hook_types=['pre-rebase'])
+ open('foo', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ cmd_output('git', 'checkout', '-b', 'branch')
+ open('bar', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ cmd_output('git', 'checkout', 'master')
+ open('baz', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ cmd_output('git', 'checkout', 'branch')
+ cmd_output('git', 'rebase', 'master', 'branch')
+ assert os.path.exists('pre-rebase.tmp')
+
+
+def test_post_rewrite_integration(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'post-rewrite',
+ 'name': 'Post rewrite',
+ 'entry': 'touch post-rewrite.tmp',
+ 'language': 'system',
+ 'always_run': True,
+ 'verbose': True,
+ 'stages': ['post-rewrite'],
+ }],
+ },
+ ],
+ }
+ write_config(path, config)
+ with cwd(path):
+ open('init', 'a').close()
+ cmd_output('git', 'add', '.')
+ install(C.CONFIG_FILE, store, hook_types=['post-rewrite'])
+ git_commit()
+
+ assert not os.path.exists('post-rewrite.tmp')
+
+ git_commit('--amend', '-m', 'ammended message')
+ assert os.path.exists('post-rewrite.tmp')
+
+
+def test_post_checkout_integration(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'post-checkout',
+ 'name': 'Post checkout',
+ 'entry': 'bash -c "echo ${PRE_COMMIT_TO_REF}"',
+ 'language': 'system',
+ 'always_run': True,
+ 'verbose': True,
+ 'stages': ['post-checkout'],
+ }],
+ },
+ {'repo': 'meta', 'hooks': [{'id': 'identity'}]},
+ ],
+ }
+ write_config(path, config)
+ with cwd(path):
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ # add a file only on `feature`, it should not be passed to hooks
+ cmd_output('git', 'checkout', '-b', 'feature')
+ open('some_file', 'a').close()
+ cmd_output('git', 'add', '.')
+ git_commit()
+ cmd_output('git', 'checkout', 'master')
+
+ install(C.CONFIG_FILE, store, hook_types=['post-checkout'])
+ retc, _, stderr = cmd_output('git', 'checkout', 'feature')
+ assert stderr is not None
+ assert retc == 0
+ assert git.head_rev(path) in stderr
+ assert 'some_file' not in stderr
+
+
+def test_skips_post_checkout_unstaged_changes(tempdir_factory, store):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'fail',
+ 'name': 'fail',
+ 'entry': 'fail',
+ 'language': 'fail',
+ 'always_run': True,
+ 'stages': ['post-checkout'],
+ }],
+ }
+ write_config(path, config)
+ with cwd(path):
+ cmd_output('git', 'add', '.')
+ _get_commit_output(tempdir_factory)
+
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+ install(C.CONFIG_FILE, store, hook_types=['post-checkout'])
+
+ # make an unstaged change so staged_files_only fires
+ open('file', 'a').close()
+ cmd_output('git', 'add', 'file')
+ with open('file', 'w') as f:
+ f.write('unstaged changes')
+
+ retc, out = _get_commit_output(tempdir_factory, all_files=False)
+ assert retc == 0
+
+
+def test_prepare_commit_msg_integration_failing(
+ failing_prepare_commit_msg_repo, tempdir_factory, store,
+):
+ install(C.CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
+ retc, out = _get_commit_output(tempdir_factory)
+ assert retc == 1
+ assert out == '''\
+Add "Signed off by:".....................................................Failed
+- hook id: add-signoff
+- exit code: 1
+'''
+
+
+def test_prepare_commit_msg_integration_passing(
+ prepare_commit_msg_repo, tempdir_factory, store,
+):
+ install(C.CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
+ retc, out = _get_commit_output(tempdir_factory, msg='Hi')
+ assert retc == 0
+ first_line = out.splitlines()[0]
+ assert first_line.startswith('Add "Signed off by:"...')
+ assert first_line.endswith('...Passed')
+ commit_msg_path = os.path.join(
+ prepare_commit_msg_repo, '.git/COMMIT_EDITMSG',
+ )
+ with open(commit_msg_path) as f:
+ assert 'Signed off by: ' in f.read()
+
+
+def test_prepare_commit_msg_legacy(
+ prepare_commit_msg_repo, tempdir_factory, store,
+):
+ hook_path = os.path.join(
+ prepare_commit_msg_repo, '.git/hooks/prepare-commit-msg',
+ )
+ os.makedirs(os.path.dirname(hook_path), exist_ok=True)
+ with open(hook_path, 'w') as hook_file:
+ hook_file.write(
+ '#!/usr/bin/env bash\n'
+ 'set -eu\n'
+ 'test -e "$1"\n'
+ 'echo legacy\n',
+ )
+ make_executable(hook_path)
+
+ install(C.CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
+
+ retc, out = _get_commit_output(tempdir_factory, msg='Hi')
+ assert retc == 0
+ first_line, second_line = out.splitlines()[:2]
+ assert first_line == 'legacy'
+ assert second_line.startswith('Add "Signed off by:"...')
+ commit_msg_path = os.path.join(
+ prepare_commit_msg_repo, '.git/COMMIT_EDITMSG',
+ )
+ with open(commit_msg_path) as f:
+ assert 'Signed off by: ' in f.read()
+
+
+def test_pre_merge_commit_integration(tempdir_factory, store):
+ output_pattern = re_assert.Matches(
+ r'^\[INFO\] Initializing environment for .+\n'
+ r'Bash hook\.+Passed\n'
+ r"Merge made by the '(ort|recursive)' strategy.\n"
+ r' foo \| 0\n'
+ r' 1 file changed, 0 insertions\(\+\), 0 deletions\(-\)\n'
+ r' create mode 100644 foo\n$',
+ )
+
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ ret = install(C.CONFIG_FILE, store, hook_types=['pre-merge-commit'])
+ assert ret == 0
+
+ cmd_output('git', 'checkout', 'master', '-b', 'feature')
+ _get_commit_output(tempdir_factory)
+ cmd_output('git', 'checkout', 'master')
+ ret, output, _ = cmd_output_mocked_pre_commit_home(
+ 'git', 'merge', '--no-ff', '--no-edit', 'feature',
+ tempdir_factory=tempdir_factory,
+ )
+ assert ret == 0
+ output_pattern.assert_matches(output)
+
+
+def test_install_disallow_missing_config(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ remove_config_from_repo(path)
+ ret = install(
+ C.CONFIG_FILE, store, hook_types=['pre-commit'],
+ overwrite=True, skip_on_missing_config=False,
+ )
+ assert ret == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 1
+
+
+def test_install_allow_missing_config(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ remove_config_from_repo(path)
+ ret = install(
+ C.CONFIG_FILE, store, hook_types=['pre-commit'],
+ overwrite=True, skip_on_missing_config=True,
+ )
+ assert ret == 0
+
+ ret, output = _get_commit_output(tempdir_factory)
+ assert ret == 0
+ expected = (
+ '`.pre-commit-config.yaml` config file not found. '
+ 'Skipping `pre-commit`.'
+ )
+ assert expected in output
+
+
+def test_install_temporarily_allow_mising_config(tempdir_factory, store):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(path):
+ remove_config_from_repo(path)
+ ret = install(
+ C.CONFIG_FILE, store, hook_types=['pre-commit'],
+ overwrite=True, skip_on_missing_config=False,
+ )
+ assert ret == 0
+
+ env = dict(os.environ, PRE_COMMIT_ALLOW_NO_CONFIG='1')
+ ret, output = _get_commit_output(tempdir_factory, env=env)
+ assert ret == 0
+ expected = (
+ '`.pre-commit-config.yaml` config file not found. '
+ 'Skipping `pre-commit`.'
+ )
+ assert expected in output
+
+
+def test_install_uninstall_default_hook_types(in_git_dir, store):
+ cfg_src = 'default_install_hook_types: [pre-commit, pre-push]\nrepos: []\n'
+ in_git_dir.join(C.CONFIG_FILE).write(cfg_src)
+
+ assert not install(C.CONFIG_FILE, store, hook_types=None)
+ assert os.access(in_git_dir.join('.git/hooks/pre-commit').strpath, os.X_OK)
+ assert os.access(in_git_dir.join('.git/hooks/pre-push').strpath, os.X_OK)
+
+ assert not uninstall(C.CONFIG_FILE, hook_types=None)
+ assert not in_git_dir.join('.git/hooks/pre-commit').exists()
+ assert not in_git_dir.join('.git/hooks/pre-push').exists()
diff --git a/tests/commands/migrate_config_test.py b/tests/commands/migrate_config_test.py
new file mode 100644
index 0000000..ba18463
--- /dev/null
+++ b/tests/commands/migrate_config_test.py
@@ -0,0 +1,177 @@
+from __future__ import annotations
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit.clientlib import InvalidConfigError
+from pre_commit.commands.migrate_config import migrate_config
+
+
+def test_migrate_config_normal_format(tmpdir, capsys):
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n',
+ )
+ with tmpdir.as_cwd():
+ assert not migrate_config(C.CONFIG_FILE)
+ out, _ = capsys.readouterr()
+ assert out == 'Configuration has been migrated.\n'
+ contents = cfg.read()
+ assert contents == (
+ 'repos:\n'
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n'
+ )
+
+
+def test_migrate_config_document_marker(tmpdir):
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(
+ '# comment\n'
+ '\n'
+ '---\n'
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n',
+ )
+ with tmpdir.as_cwd():
+ assert not migrate_config(C.CONFIG_FILE)
+ contents = cfg.read()
+ assert contents == (
+ '# comment\n'
+ '\n'
+ '---\n'
+ 'repos:\n'
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n'
+ )
+
+
+def test_migrate_config_list_literal(tmpdir):
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(
+ '[{\n'
+ ' repo: local,\n'
+ ' hooks: [{\n'
+ ' id: foo, name: foo, entry: ./bin/foo.sh,\n'
+ ' language: script,\n'
+ ' }]\n'
+ '}]',
+ )
+ with tmpdir.as_cwd():
+ assert not migrate_config(C.CONFIG_FILE)
+ contents = cfg.read()
+ assert contents == (
+ 'repos:\n'
+ ' [{\n'
+ ' repo: local,\n'
+ ' hooks: [{\n'
+ ' id: foo, name: foo, entry: ./bin/foo.sh,\n'
+ ' language: script,\n'
+ ' }]\n'
+ ' }]'
+ )
+
+
+def test_already_migrated_configuration_noop(tmpdir, capsys):
+ contents = (
+ 'repos:\n'
+ '- repo: local\n'
+ ' hooks:\n'
+ ' - id: foo\n'
+ ' name: foo\n'
+ ' entry: ./bin/foo.sh\n'
+ ' language: script\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(contents)
+ with tmpdir.as_cwd():
+ assert not migrate_config(C.CONFIG_FILE)
+ out, _ = capsys.readouterr()
+ assert out == 'Configuration is already migrated.\n'
+ assert cfg.read() == contents
+
+
+def test_migrate_config_sha_to_rev(tmpdir):
+ contents = (
+ 'repos:\n'
+ '- repo: https://github.com/pre-commit/pre-commit-hooks\n'
+ ' sha: v1.2.0\n'
+ ' hooks: []\n'
+ '- repo: https://github.com/pre-commit/pre-commit-hooks\n'
+ ' sha: v1.2.0\n'
+ ' hooks: []\n'
+ )
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(contents)
+ with tmpdir.as_cwd():
+ assert not migrate_config(C.CONFIG_FILE)
+ contents = cfg.read()
+ assert contents == (
+ 'repos:\n'
+ '- repo: https://github.com/pre-commit/pre-commit-hooks\n'
+ ' rev: v1.2.0\n'
+ ' hooks: []\n'
+ '- repo: https://github.com/pre-commit/pre-commit-hooks\n'
+ ' rev: v1.2.0\n'
+ ' hooks: []\n'
+ )
+
+
+def test_migrate_config_language_python_venv(tmp_path):
+ src = '''\
+repos:
+- repo: local
+ hooks:
+ - id: example
+ name: example
+ entry: example
+ language: python_venv
+ - id: example
+ name: example
+ entry: example
+ language: system
+'''
+ expected = '''\
+repos:
+- repo: local
+ hooks:
+ - id: example
+ name: example
+ entry: example
+ language: python
+ - id: example
+ name: example
+ entry: example
+ language: system
+'''
+ cfg = tmp_path.joinpath('cfg.yaml')
+ cfg.write_text(src)
+ assert migrate_config(str(cfg)) == 0
+ assert cfg.read_text() == expected
+
+
+def test_migrate_config_invalid_yaml(tmpdir):
+ contents = '['
+ cfg = tmpdir.join(C.CONFIG_FILE)
+ cfg.write(contents)
+ with tmpdir.as_cwd(), pytest.raises(InvalidConfigError) as excinfo:
+ migrate_config(C.CONFIG_FILE)
+ expected = '\n==> File .pre-commit-config.yaml\n=====> '
+ assert str(excinfo.value).startswith(expected)
diff --git a/tests/commands/run_test.py b/tests/commands/run_test.py
new file mode 100644
index 0000000..e36a3ca
--- /dev/null
+++ b/tests/commands/run_test.py
@@ -0,0 +1,1219 @@
+from __future__ import annotations
+
+import os.path
+import shlex
+import sys
+import time
+from collections.abc import MutableMapping
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import color
+from pre_commit.commands.install_uninstall import install
+from pre_commit.commands.run import _compute_cols
+from pre_commit.commands.run import _full_msg
+from pre_commit.commands.run import _get_skips
+from pre_commit.commands.run import _has_unmerged_paths
+from pre_commit.commands.run import _start_msg
+from pre_commit.commands.run import Classifier
+from pre_commit.commands.run import filter_by_include_exclude
+from pre_commit.commands.run import run
+from pre_commit.util import cmd_output
+from pre_commit.util import make_executable
+from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import git_dir
+from testing.fixtures import make_consuming_repo
+from testing.fixtures import modify_config
+from testing.fixtures import read_config
+from testing.fixtures import sample_meta_config
+from testing.fixtures import write_config
+from testing.util import cmd_output_mocked_pre_commit_home
+from testing.util import cwd
+from testing.util import git_commit
+from testing.util import run_opts
+
+
+def test_start_msg():
+ ret = _start_msg(start='start', end_len=5, cols=15)
+ # 4 dots: 15 - 5 - 5 - 1
+ assert ret == 'start....'
+
+
+def test_full_msg():
+ ret = _full_msg(
+ start='start',
+ end_msg='end',
+ end_color='',
+ use_color=False,
+ cols=15,
+ )
+ # 6 dots: 15 - 5 - 3 - 1
+ assert ret == 'start......end\n'
+
+
+def test_full_msg_with_cjk():
+ ret = _full_msg(
+ start='ε•Šγ‚μ•„',
+ end_msg='end',
+ end_color='',
+ use_color=False,
+ cols=15,
+ )
+ # 5 dots: 15 - 6 - 3 - 1
+ assert ret == 'ε•Šγ‚μ•„.....end\n'
+
+
+def test_full_msg_with_color():
+ ret = _full_msg(
+ start='start',
+ end_msg='end',
+ end_color=color.RED,
+ use_color=True,
+ cols=15,
+ )
+ # 6 dots: 15 - 5 - 3 - 1
+ assert ret == f'start......{color.RED}end{color.NORMAL}\n'
+
+
+def test_full_msg_with_postfix():
+ ret = _full_msg(
+ start='start',
+ postfix='post ',
+ end_msg='end',
+ end_color='',
+ use_color=False,
+ cols=20,
+ )
+ # 6 dots: 20 - 5 - 5 - 3 - 1
+ assert ret == 'start......post end\n'
+
+
+def test_full_msg_postfix_not_colored():
+ ret = _full_msg(
+ start='start',
+ postfix='post ',
+ end_msg='end',
+ end_color=color.RED,
+ use_color=True,
+ cols=20,
+ )
+ # 6 dots: 20 - 5 - 5 - 3 - 1
+ assert ret == f'start......post {color.RED}end{color.NORMAL}\n'
+
+
+@pytest.fixture
+def repo_with_passing_hook(tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(git_path):
+ yield git_path
+
+
+@pytest.fixture
+def repo_with_failing_hook(tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
+ with cwd(git_path):
+ yield git_path
+
+
+@pytest.fixture
+def aliased_repo(tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(git_path):
+ with modify_config() as config:
+ config['repos'][0]['hooks'].append(
+ {'id': 'bash_hook', 'alias': 'foo_bash'},
+ )
+ stage_a_file()
+ yield git_path
+
+
+def stage_a_file(filename='foo.py'):
+ open(filename, 'a').close()
+ cmd_output('git', 'add', filename)
+
+
+def _do_run(cap_out, store, repo, args, environ={}, config_file=C.CONFIG_FILE):
+ with cwd(repo): # replicates `main._adjust_args_and_chdir` behaviour
+ ret = run(config_file, store, args, environ=environ)
+ printed = cap_out.get_bytes()
+ return ret, printed
+
+
+def _test_run(
+ cap_out, store, repo, opts, expected_outputs, expected_ret, stage,
+ config_file=C.CONFIG_FILE,
+):
+ if stage:
+ stage_a_file()
+ args = run_opts(**opts)
+ ret, printed = _do_run(cap_out, store, repo, args, config_file=config_file)
+
+ assert ret == expected_ret, (ret, expected_ret, printed)
+ for expected_output_part in expected_outputs:
+ assert expected_output_part in printed
+
+
+def test_run_all_hooks_failing(cap_out, store, repo_with_failing_hook):
+ _test_run(
+ cap_out,
+ store,
+ repo_with_failing_hook,
+ {},
+ (
+ b'Failing hook',
+ b'Failed',
+ b'hook id: failing_hook',
+ b'Fail\nfoo.py\n',
+ ),
+ expected_ret=1,
+ stage=True,
+ )
+
+
+def test_arbitrary_bytes_hook(cap_out, store, tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'arbitrary_bytes_repo')
+ with cwd(git_path):
+ _test_run(
+ cap_out, store, git_path, {}, (b'\xe2\x98\x83\xb2\n',), 1, True,
+ )
+
+
+def test_hook_that_modifies_but_returns_zero(cap_out, store, tempdir_factory):
+ git_path = make_consuming_repo(
+ tempdir_factory, 'modified_file_returns_zero_repo',
+ )
+ with cwd(git_path):
+ stage_a_file('bar.py')
+ _test_run(
+ cap_out,
+ store,
+ git_path,
+ {},
+ (
+ # The first should fail
+ b'Failed',
+ # With a modified file (default message + the hook's output)
+ b'- files were modified by this hook\n\n'
+ b'Modified: foo.py',
+ # The next hook should pass despite the first modifying
+ b'Passed',
+ # The next hook should fail
+ b'Failed',
+ # bar.py was modified, but provides no additional output
+ b'- files were modified by this hook\n',
+ ),
+ 1,
+ True,
+ )
+
+
+def test_types_hook_repository(cap_out, store, tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'types_repo')
+ with cwd(git_path):
+ stage_a_file('bar.py')
+ stage_a_file('bar.notpy')
+ ret, printed = _do_run(cap_out, store, git_path, run_opts())
+ assert ret == 1
+ assert b'bar.py' in printed
+ assert b'bar.notpy' not in printed
+
+
+def test_types_or_hook_repository(cap_out, store, tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'types_or_repo')
+ with cwd(git_path):
+ stage_a_file('bar.notpy')
+ stage_a_file('bar.pxd')
+ stage_a_file('bar.py')
+ ret, printed = _do_run(cap_out, store, git_path, run_opts())
+ assert ret == 1
+ assert b'bar.notpy' not in printed
+ assert b'bar.pxd' in printed
+ assert b'bar.py' in printed
+
+
+def test_exclude_types_hook_repository(cap_out, store, tempdir_factory):
+ git_path = make_consuming_repo(tempdir_factory, 'exclude_types_repo')
+ with cwd(git_path):
+ with open('exe', 'w') as exe:
+ exe.write('#!/usr/bin/env python3\n')
+ make_executable('exe')
+ cmd_output('git', 'add', 'exe')
+ stage_a_file('bar.py')
+ ret, printed = _do_run(cap_out, store, git_path, run_opts())
+ assert ret == 1
+ assert b'bar.py' in printed
+ assert b'exe' not in printed
+
+
+def test_global_exclude(cap_out, store, in_git_dir):
+ config = {
+ 'exclude': r'^foo\.py$',
+ 'repos': [{'repo': 'meta', 'hooks': [{'id': 'identity'}]}],
+ }
+ write_config('.', config)
+ open('foo.py', 'a').close()
+ open('bar.py', 'a').close()
+ cmd_output('git', 'add', '.')
+ opts = run_opts(verbose=True)
+ ret, printed = _do_run(cap_out, store, str(in_git_dir), opts)
+ assert ret == 0
+ # Does not contain foo.py since it was excluded
+ assert printed.startswith(f'identity{"." * 65}Passed\n'.encode())
+ assert printed.endswith(b'\n\n.pre-commit-config.yaml\nbar.py\n\n')
+
+
+def test_global_files(cap_out, store, in_git_dir):
+ config = {
+ 'files': r'^bar\.py$',
+ 'repos': [{'repo': 'meta', 'hooks': [{'id': 'identity'}]}],
+ }
+ write_config('.', config)
+ open('foo.py', 'a').close()
+ open('bar.py', 'a').close()
+ cmd_output('git', 'add', '.')
+ opts = run_opts(verbose=True)
+ ret, printed = _do_run(cap_out, store, str(in_git_dir), opts)
+ assert ret == 0
+ # Does not contain foo.py since it was excluded
+ assert printed.startswith(f'identity{"." * 65}Passed\n'.encode())
+ assert printed.endswith(b'\n\nbar.py\n\n')
+
+
+@pytest.mark.parametrize(
+ ('t1', 't2', 'expected'),
+ (
+ (1.234, 2., b'\n- duration: 0.77s\n'),
+ (1., 1., b'\n- duration: 0s\n'),
+ ),
+)
+def test_verbose_duration(cap_out, store, in_git_dir, t1, t2, expected):
+ write_config('.', {'repo': 'meta', 'hooks': [{'id': 'identity'}]})
+ cmd_output('git', 'add', '.')
+ opts = run_opts(verbose=True)
+ with mock.patch.object(time, 'monotonic', side_effect=(t1, t2)):
+ ret, printed = _do_run(cap_out, store, str(in_git_dir), opts)
+ assert ret == 0
+ assert expected in printed
+
+
+@pytest.mark.parametrize(
+ ('args', 'expected_out'),
+ [
+ (
+ {
+ 'show_diff_on_failure': True,
+ },
+ b'All changes made by hooks:',
+ ),
+ (
+ {
+ 'show_diff_on_failure': True,
+ 'color': True,
+ },
+ b'All changes made by hooks:',
+ ),
+ (
+ {
+ 'show_diff_on_failure': True,
+ 'all_files': True,
+ },
+ b'reproduce locally with: pre-commit run --all-files',
+ ),
+ ],
+)
+def test_show_diff_on_failure(
+ args,
+ expected_out,
+ capfd,
+ cap_out,
+ store,
+ tempdir_factory,
+):
+ git_path = make_consuming_repo(
+ tempdir_factory, 'modified_file_returns_zero_repo',
+ )
+ with cwd(git_path):
+ stage_a_file('bar.py')
+ _test_run(
+ cap_out, store, git_path, args,
+ # we're only testing the output after running
+ expected_out, 1, True,
+ )
+ out, _ = capfd.readouterr()
+ assert 'diff --git' in out
+
+
+@pytest.mark.parametrize(
+ ('options', 'outputs', 'expected_ret', 'stage'),
+ (
+ ({}, (b'Bash hook', b'Passed'), 0, True),
+ ({'verbose': True}, (b'foo.py\nHello World',), 0, True),
+ ({'hook': 'bash_hook'}, (b'Bash hook', b'Passed'), 0, True),
+ (
+ {'hook': 'nope'},
+ (b'No hook with id `nope` in stage `pre-commit`',),
+ 1,
+ True,
+ ),
+ (
+ {'hook': 'nope', 'hook_stage': 'pre-push'},
+ (b'No hook with id `nope` in stage `pre-push`',),
+ 1,
+ True,
+ ),
+ (
+ {'all_files': True, 'verbose': True},
+ (b'foo.py',),
+ 0,
+ True,
+ ),
+ (
+ {'files': ('foo.py',), 'verbose': True},
+ (b'foo.py',),
+ 0,
+ True,
+ ),
+ ({}, (b'Bash hook', b'(no files to check)', b'Skipped'), 0, False),
+ ),
+)
+def test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ options,
+ outputs,
+ expected_ret,
+ stage,
+):
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ options,
+ outputs,
+ expected_ret,
+ stage,
+ )
+
+
+def test_run_output_logfile(cap_out, store, tempdir_factory):
+ expected_output = (
+ b'This is STDOUT output\n',
+ b'This is STDERR output\n',
+ )
+
+ git_path = make_consuming_repo(tempdir_factory, 'logfile_repo')
+ with cwd(git_path):
+ _test_run(
+ cap_out,
+ store,
+ git_path, {},
+ expected_output,
+ expected_ret=1,
+ stage=True,
+ )
+ logfile_path = os.path.join(git_path, 'test.log')
+ assert os.path.exists(logfile_path)
+ with open(logfile_path, 'rb') as logfile:
+ logfile_content = logfile.readlines()
+
+ for expected_output_part in expected_output:
+ assert expected_output_part in logfile_content
+
+
+def test_always_run(cap_out, store, repo_with_passing_hook):
+ with modify_config() as config:
+ config['repos'][0]['hooks'][0]['always_run'] = True
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ {},
+ (b'Bash hook', b'Passed'),
+ 0,
+ stage=False,
+ )
+
+
+def test_always_run_alt_config(cap_out, store, repo_with_passing_hook):
+ repo_root = '.'
+ config = read_config(repo_root)
+ config['repos'][0]['hooks'][0]['always_run'] = True
+ alt_config_file = 'alternate_config.yaml'
+ add_config_to_repo(repo_root, config, config_file=alt_config_file)
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ {},
+ (b'Bash hook', b'Passed'),
+ 0,
+ stage=False,
+ config_file=alt_config_file,
+ )
+
+
+def test_hook_verbose_enabled(cap_out, store, repo_with_passing_hook):
+ with modify_config() as config:
+ config['repos'][0]['hooks'][0]['always_run'] = True
+ config['repos'][0]['hooks'][0]['verbose'] = True
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ {},
+ (b'Hello World',),
+ 0,
+ stage=False,
+ )
+
+
+@pytest.mark.parametrize(
+ ('from_ref', 'to_ref'), (('master', ''), ('', 'master')),
+)
+def test_from_ref_to_ref_error_msg_error(
+ cap_out, store, repo_with_passing_hook, from_ref, to_ref,
+):
+ args = run_opts(from_ref=from_ref, to_ref=to_ref)
+ ret, printed = _do_run(cap_out, store, repo_with_passing_hook, args)
+ assert ret == 1
+ assert b'Specify both --from-ref and --to-ref.' in printed
+
+
+def test_all_push_options_ok(cap_out, store, repo_with_passing_hook):
+ args = run_opts(
+ from_ref='master', to_ref='master',
+ remote_branch='master',
+ local_branch='master',
+ remote_name='origin', remote_url='https://example.com/repo',
+ )
+ ret, printed = _do_run(cap_out, store, repo_with_passing_hook, args)
+ assert ret == 0
+ assert b'Specify both --from-ref and --to-ref.' not in printed
+
+
+def test_is_squash_merge(cap_out, store, repo_with_passing_hook):
+ args = run_opts(is_squash_merge='1')
+ environ: MutableMapping[str, str] = {}
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, args, environ,
+ )
+ assert environ['PRE_COMMIT_IS_SQUASH_MERGE'] == '1'
+
+
+def test_rewrite_command(cap_out, store, repo_with_passing_hook):
+ args = run_opts(rewrite_command='amend')
+ environ: MutableMapping[str, str] = {}
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, args, environ,
+ )
+ assert environ['PRE_COMMIT_REWRITE_COMMAND'] == 'amend'
+
+
+def test_checkout_type(cap_out, store, repo_with_passing_hook):
+ args = run_opts(from_ref='', to_ref='', checkout_type='1')
+ environ: MutableMapping[str, str] = {}
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, args, environ,
+ )
+ assert environ['PRE_COMMIT_CHECKOUT_TYPE'] == '1'
+
+
+def test_has_unmerged_paths(in_merge_conflict):
+ assert _has_unmerged_paths() is True
+ cmd_output('git', 'add', '.')
+ assert _has_unmerged_paths() is False
+
+
+def test_merge_conflict(cap_out, store, in_merge_conflict):
+ ret, printed = _do_run(cap_out, store, in_merge_conflict, run_opts())
+ assert ret == 1
+ assert b'Unmerged files. Resolve before committing.' in printed
+
+
+def test_files_during_merge_conflict(cap_out, store, in_merge_conflict):
+ opts = run_opts(files=['placeholder'])
+ ret, printed = _do_run(cap_out, store, in_merge_conflict, opts)
+ assert ret == 0
+ assert b'Bash hook' in printed
+
+
+def test_merge_conflict_modified(cap_out, store, in_merge_conflict):
+ # Touch another file so we have unstaged non-conflicting things
+ assert os.path.exists('placeholder')
+ with open('placeholder', 'w') as placeholder_file:
+ placeholder_file.write('bar\nbaz\n')
+
+ ret, printed = _do_run(cap_out, store, in_merge_conflict, run_opts())
+ assert ret == 1
+ assert b'Unmerged files. Resolve before committing.' in printed
+
+
+def test_merge_conflict_resolved(cap_out, store, in_merge_conflict):
+ cmd_output('git', 'add', '.')
+ ret, printed = _do_run(cap_out, store, in_merge_conflict, run_opts())
+ for msg in (
+ b'Checking merge-conflict files only.', b'Bash hook', b'Passed',
+ ):
+ assert msg in printed
+
+
+def test_rebase(cap_out, store, repo_with_passing_hook):
+ args = run_opts(pre_rebase_upstream='master', pre_rebase_branch='topic')
+ environ: MutableMapping[str, str] = {}
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, args, environ,
+ )
+ assert environ['PRE_COMMIT_PRE_REBASE_UPSTREAM'] == 'master'
+ assert environ['PRE_COMMIT_PRE_REBASE_BRANCH'] == 'topic'
+
+
+@pytest.mark.parametrize(
+ ('hooks', 'expected'),
+ (
+ ([], 80),
+ ([auto_namedtuple(id='a', name='a' * 51)], 81),
+ (
+ [
+ auto_namedtuple(id='a', name='a' * 51),
+ auto_namedtuple(id='b', name='b' * 52),
+ ],
+ 82,
+ ),
+ ),
+)
+def test_compute_cols(hooks, expected):
+ assert _compute_cols(hooks) == expected
+
+
+@pytest.mark.parametrize(
+ ('environ', 'expected_output'),
+ (
+ ({}, set()),
+ ({'SKIP': ''}, set()),
+ ({'SKIP': ','}, set()),
+ ({'SKIP': ',foo'}, {'foo'}),
+ ({'SKIP': 'foo'}, {'foo'}),
+ ({'SKIP': 'foo,bar'}, {'foo', 'bar'}),
+ ({'SKIP': ' foo , bar'}, {'foo', 'bar'}),
+ ),
+)
+def test_get_skips(environ, expected_output):
+ ret = _get_skips(environ)
+ assert ret == expected_output
+
+
+def test_skip_hook(cap_out, store, repo_with_passing_hook):
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, run_opts(),
+ {'SKIP': 'bash_hook'},
+ )
+ for msg in (b'Bash hook', b'Skipped'):
+ assert msg in printed
+
+
+def test_skip_aliased_hook(cap_out, store, aliased_repo):
+ ret, printed = _do_run(
+ cap_out, store, aliased_repo,
+ run_opts(hook='foo_bash'),
+ {'SKIP': 'foo_bash'},
+ )
+ assert ret == 0
+ # Only the aliased hook runs and is skipped
+ for msg in (b'Bash hook', b'Skipped'):
+ assert printed.count(msg) == 1
+
+
+def test_skip_bypasses_installation(cap_out, store, repo_with_passing_hook):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'skipme',
+ 'name': 'skipme',
+ 'entry': 'skipme',
+ 'language': 'python',
+ 'additional_dependencies': ['/pre-commit-does-not-exist'],
+ },
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook,
+ run_opts(all_files=True),
+ {'SKIP': 'skipme'},
+ )
+ assert ret == 0
+
+
+def test_skip_alias_bypasses_installation(
+ cap_out, store, repo_with_passing_hook,
+):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'skipme',
+ 'name': 'skipme-1',
+ 'alias': 'skipme-1',
+ 'entry': 'skipme',
+ 'language': 'python',
+ 'additional_dependencies': ['/pre-commit-does-not-exist'],
+ },
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook,
+ run_opts(all_files=True),
+ {'SKIP': 'skipme-1'},
+ )
+ assert ret == 0
+
+
+def test_hook_id_not_in_non_verbose_output(
+ cap_out, store, repo_with_passing_hook,
+):
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, run_opts(verbose=False),
+ )
+ assert b'[bash_hook]' not in printed
+
+
+def test_hook_id_in_verbose_output(cap_out, store, repo_with_passing_hook):
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, run_opts(verbose=True),
+ )
+ assert b'- hook id: bash_hook' in printed
+
+
+def test_multiple_hooks_same_id(cap_out, store, repo_with_passing_hook):
+ with cwd(repo_with_passing_hook):
+ # Add bash hook on there again
+ with modify_config() as config:
+ config['repos'][0]['hooks'].append({'id': 'bash_hook'})
+ stage_a_file()
+
+ ret, output = _do_run(cap_out, store, repo_with_passing_hook, run_opts())
+ assert ret == 0
+ assert output.count(b'Bash hook') == 2
+
+
+def test_aliased_hook_run(cap_out, store, aliased_repo):
+ ret, output = _do_run(
+ cap_out, store, aliased_repo,
+ run_opts(verbose=True, hook='bash_hook'),
+ )
+ assert ret == 0
+ # Both hooks will run since they share the same ID
+ assert output.count(b'Bash hook') == 2
+
+ ret, output = _do_run(
+ cap_out, store, aliased_repo,
+ run_opts(verbose=True, hook='foo_bash'),
+ )
+ assert ret == 0
+ # Only the aliased hook runs
+ assert output.count(b'Bash hook') == 1
+
+
+def test_non_ascii_hook_id(repo_with_passing_hook, tempdir_factory):
+ with cwd(repo_with_passing_hook):
+ _, stdout, _ = cmd_output_mocked_pre_commit_home(
+ sys.executable, '-m', 'pre_commit.main', 'run', 'β˜ƒ',
+ check=False, tempdir_factory=tempdir_factory,
+ )
+ assert 'UnicodeDecodeError' not in stdout
+ # Doesn't actually happen, but a reasonable assertion
+ assert 'UnicodeEncodeError' not in stdout
+
+
+def test_stdout_write_bug_py26(repo_with_failing_hook, store, tempdir_factory):
+ with cwd(repo_with_failing_hook):
+ with modify_config() as config:
+ config['repos'][0]['hooks'][0]['args'] = ['β˜ƒ']
+ stage_a_file()
+
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+
+ # Have to use subprocess because pytest monkeypatches sys.stdout
+ _, out = git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ check=False,
+ )
+ assert 'UnicodeEncodeError' not in out
+ # Doesn't actually happen, but a reasonable assertion
+ assert 'UnicodeDecodeError' not in out
+
+
+def test_lots_of_files(store, tempdir_factory):
+ # windows xargs seems to have a bug, here's a regression test for
+ # our workaround
+ git_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ with cwd(git_path):
+ # Override files so we run against them
+ with modify_config() as config:
+ config['repos'][0]['hooks'][0]['files'] = ''
+
+ # Write a crap ton of files
+ for i in range(400):
+ open(f'{"a" * 100}{i}', 'w').close()
+
+ cmd_output('git', 'add', '.')
+ install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+
+ git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ )
+
+
+def test_no_textconv(cap_out, store, repo_with_passing_hook):
+ # git textconv filters can hide changes from hooks
+ with open('.gitattributes', 'w') as fp:
+ fp.write('*.jpeg diff=empty\n')
+
+ with open('.git/config', 'a') as fp:
+ fp.write('[diff "empty"]\n')
+ fp.write('textconv = "true"\n')
+
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'extend-jpeg',
+ 'name': 'extend-jpeg',
+ 'language': 'system',
+ 'entry': (
+ f'{shlex.quote(sys.executable)} -c "import sys; '
+ 'open(sys.argv[1], \'ab\').write(b\'\\x00\')"'
+ ),
+ 'types': ['jpeg'],
+ },
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ stage_a_file('example.jpeg')
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ {},
+ (
+ b'Failed',
+ ),
+ expected_ret=1,
+ stage=False,
+ )
+
+
+def test_stages(cap_out, store, repo_with_passing_hook):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': f'do-not-commit-{i}',
+ 'name': f'hook {i}',
+ 'entry': 'DO NOT COMMIT',
+ 'language': 'pygrep',
+ 'stages': [stage],
+ }
+ for i, stage in enumerate(('pre-commit', 'pre-push', 'manual'), 1)
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ stage_a_file()
+
+ def _run_for_stage(stage):
+ args = run_opts(hook_stage=stage)
+ ret, printed = _do_run(cap_out, store, repo_with_passing_hook, args)
+ assert not ret, (ret, printed)
+ # this test should only run one hook
+ assert printed.count(b'hook ') == 1
+ return printed
+
+ assert _run_for_stage('pre-commit').startswith(b'hook 1...')
+ assert _run_for_stage('pre-push').startswith(b'hook 2...')
+ assert _run_for_stage('manual').startswith(b'hook 3...')
+
+
+def test_commit_msg_hook(cap_out, store, commit_msg_repo):
+ filename = '.git/COMMIT_EDITMSG'
+ with open(filename, 'w') as f:
+ f.write('This is the commit message')
+
+ _test_run(
+ cap_out,
+ store,
+ commit_msg_repo,
+ {'hook_stage': 'commit-msg', 'commit_msg_filename': filename},
+ expected_outputs=[b'Must have "Signed off by:"', b'Failed'],
+ expected_ret=1,
+ stage=False,
+ )
+
+
+def test_post_checkout_hook(cap_out, store, tempdir_factory):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repo': 'meta', 'hooks': [
+ {'id': 'identity', 'stages': ['post-checkout']},
+ ],
+ }
+ add_config_to_repo(path, config)
+
+ with cwd(path):
+ _test_run(
+ cap_out,
+ store,
+ path,
+ {'hook_stage': 'post-checkout'},
+ expected_outputs=[b'identity...'],
+ expected_ret=0,
+ stage=False,
+ )
+
+
+def test_prepare_commit_msg_hook(cap_out, store, prepare_commit_msg_repo):
+ filename = '.git/COMMIT_EDITMSG'
+ with open(filename, 'w') as f:
+ f.write('This is the commit message')
+
+ _test_run(
+ cap_out,
+ store,
+ prepare_commit_msg_repo,
+ {
+ 'hook_stage': 'prepare-commit-msg',
+ 'commit_msg_filename': filename,
+ 'prepare_commit_message_source': 'commit',
+ 'commit_object_name': 'HEAD',
+ },
+ expected_outputs=[b'Add "Signed off by:"', b'Passed'],
+ expected_ret=0,
+ stage=False,
+ )
+
+ with open(filename) as f:
+ assert 'Signed off by: ' in f.read()
+
+
+def test_local_hook_passes(cap_out, store, repo_with_passing_hook):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'identity-copy',
+ 'name': 'identity-copy',
+ 'entry': '{} -m pre_commit.meta_hooks.identity'.format(
+ shlex.quote(sys.executable),
+ ),
+ 'language': 'system',
+ 'files': r'\.py$',
+ },
+ {
+ 'id': 'do_not_commit',
+ 'name': 'Block if "DO NOT COMMIT" is found',
+ 'entry': 'DO NOT COMMIT',
+ 'language': 'pygrep',
+ },
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ with open('placeholder.py', 'w') as staged_file:
+ staged_file.write('"""TODO: something"""\n')
+ cmd_output('git', 'add', 'placeholder.py')
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ opts={},
+ expected_outputs=[b''],
+ expected_ret=0,
+ stage=False,
+ )
+
+
+def test_local_hook_fails(cap_out, store, repo_with_passing_hook):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'no-todo',
+ 'name': 'No TODO',
+ 'entry': 'sh -c "! grep -iI todo $@" --',
+ 'language': 'system',
+ }],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ with open('placeholder.py', 'w') as staged_file:
+ staged_file.write('"""TODO: something"""\n')
+ cmd_output('git', 'add', 'placeholder.py')
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ opts={},
+ expected_outputs=[b''],
+ expected_ret=1,
+ stage=False,
+ )
+
+
+def test_meta_hook_passes(cap_out, store, repo_with_passing_hook):
+ add_config_to_repo(repo_with_passing_hook, sample_meta_config())
+
+ _test_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ opts={},
+ expected_outputs=[b'Check for useless excludes'],
+ expected_ret=0,
+ stage=False,
+ )
+
+
+@pytest.fixture
+def modified_config_repo(repo_with_passing_hook):
+ with modify_config(repo_with_passing_hook, commit=False) as config:
+ # Some minor modification
+ config['repos'][0]['hooks'][0]['files'] = ''
+ yield repo_with_passing_hook
+
+
+def test_error_with_unstaged_config(cap_out, store, modified_config_repo):
+ args = run_opts()
+ ret, printed = _do_run(cap_out, store, modified_config_repo, args)
+ assert b'Your pre-commit configuration is unstaged.' in printed
+ assert ret == 1
+
+
+def test_commit_msg_missing_filename(cap_out, store, repo_with_passing_hook):
+ args = run_opts(hook_stage='commit-msg')
+ ret, printed = _do_run(cap_out, store, repo_with_passing_hook, args)
+ assert ret == 1
+ assert printed == (
+ b'[ERROR] `--commit-msg-filename` is required for '
+ b'`--hook-stage commit-msg`\n'
+ )
+
+
+@pytest.mark.parametrize(
+ 'opts', (run_opts(all_files=True), run_opts(files=[C.CONFIG_FILE])),
+)
+def test_no_unstaged_error_with_all_files_or_files(
+ cap_out, store, modified_config_repo, opts,
+):
+ ret, printed = _do_run(cap_out, store, modified_config_repo, opts)
+ assert b'Your pre-commit configuration is unstaged.' not in printed
+
+
+def test_files_running_subdir(repo_with_passing_hook, tempdir_factory):
+ with cwd(repo_with_passing_hook):
+ os.mkdir('subdir')
+ open('subdir/foo.py', 'w').close()
+ cmd_output('git', 'add', 'subdir/foo.py')
+
+ with cwd('subdir'):
+ # Use subprocess to demonstrate behaviour in main
+ _, stdout, _ = cmd_output_mocked_pre_commit_home(
+ sys.executable, '-m', 'pre_commit.main', 'run', '-v',
+ # Files relative to where we are (#339)
+ '--files', 'foo.py',
+ tempdir_factory=tempdir_factory,
+ )
+ assert 'subdir/foo.py' in stdout
+
+
+@pytest.mark.parametrize(
+ ('pass_filenames', 'hook_args', 'expected_out'),
+ (
+ (True, [], b'foo.py'),
+ (False, [], b''),
+ (True, ['some', 'args'], b'some args foo.py'),
+ (False, ['some', 'args'], b'some args'),
+ ),
+)
+def test_pass_filenames(
+ cap_out, store, repo_with_passing_hook,
+ pass_filenames, hook_args, expected_out,
+):
+ with modify_config() as config:
+ config['repos'][0]['hooks'][0]['pass_filenames'] = pass_filenames
+ config['repos'][0]['hooks'][0]['args'] = hook_args
+ stage_a_file()
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, run_opts(verbose=True),
+ )
+ assert expected_out + b'\nHello World' in printed
+ assert (b'foo.py' in printed) == pass_filenames
+
+
+def test_fail_fast(cap_out, store, repo_with_failing_hook):
+ with modify_config() as config:
+ # More than one hook
+ config['fail_fast'] = True
+ config['repos'][0]['hooks'] *= 2
+ stage_a_file()
+
+ ret, printed = _do_run(cap_out, store, repo_with_failing_hook, run_opts())
+ # it should have only run one hook
+ assert printed.count(b'Failing hook') == 1
+
+
+def test_fail_fast_per_hook(cap_out, store, repo_with_failing_hook):
+ with modify_config() as config:
+ # More than one hook
+ config['repos'][0]['hooks'] *= 2
+ config['repos'][0]['hooks'][0]['fail_fast'] = True
+ stage_a_file()
+
+ ret, printed = _do_run(cap_out, store, repo_with_failing_hook, run_opts())
+ # it should have only run one hook
+ assert printed.count(b'Failing hook') == 1
+
+
+def test_classifier_removes_dne():
+ classifier = Classifier(('this_file_does_not_exist',))
+ assert classifier.filenames == []
+
+
+def test_classifier_normalizes_filenames_on_windows_to_forward_slashes(tmpdir):
+ with tmpdir.as_cwd():
+ tmpdir.join('a/b/c').ensure()
+ with mock.patch.object(os, 'altsep', '/'):
+ with mock.patch.object(os, 'sep', '\\'):
+ classifier = Classifier.from_config((r'a\b\c',), '', '^$')
+ assert classifier.filenames == ['a/b/c']
+
+
+def test_classifier_does_not_normalize_backslashes_non_windows(tmpdir):
+ with mock.patch.object(os.path, 'lexists', return_value=True):
+ with mock.patch.object(os, 'altsep', None):
+ with mock.patch.object(os, 'sep', '/'):
+ classifier = Classifier.from_config((r'a/b\c',), '', '^$')
+ assert classifier.filenames == [r'a/b\c']
+
+
+def test_classifier_empty_types_or(tmpdir):
+ tmpdir.join('bar').ensure()
+ os.symlink(tmpdir.join('bar'), tmpdir.join('foo'))
+ with tmpdir.as_cwd():
+ classifier = Classifier(('foo', 'bar'))
+ for_symlink = classifier.by_types(
+ classifier.filenames,
+ types=['symlink'],
+ types_or=[],
+ exclude_types=[],
+ )
+ for_file = classifier.by_types(
+ classifier.filenames,
+ types=['file'],
+ types_or=[],
+ exclude_types=[],
+ )
+ assert tuple(for_symlink) == ('foo',)
+ assert tuple(for_file) == ('bar',)
+
+
+@pytest.fixture
+def some_filenames():
+ return (
+ '.pre-commit-hooks.yaml',
+ 'pre_commit/git.py',
+ 'pre_commit/main.py',
+ )
+
+
+def test_include_exclude_base_case(some_filenames):
+ ret = filter_by_include_exclude(some_filenames, '', '^$')
+ assert tuple(ret) == (
+ '.pre-commit-hooks.yaml',
+ 'pre_commit/git.py',
+ 'pre_commit/main.py',
+ )
+
+
+def test_matches_broken_symlink(tmpdir):
+ with tmpdir.as_cwd():
+ os.symlink('does-not-exist', 'link')
+ ret = filter_by_include_exclude({'link'}, '', '^$')
+ assert tuple(ret) == ('link',)
+
+
+def test_include_exclude_total_match(some_filenames):
+ ret = filter_by_include_exclude(some_filenames, r'^.*\.py$', '^$')
+ assert tuple(ret) == ('pre_commit/git.py', 'pre_commit/main.py')
+
+
+def test_include_exclude_does_search_instead_of_match(some_filenames):
+ ret = filter_by_include_exclude(some_filenames, r'\.yaml$', '^$')
+ assert tuple(ret) == ('.pre-commit-hooks.yaml',)
+
+
+def test_include_exclude_exclude_removes_files(some_filenames):
+ ret = filter_by_include_exclude(some_filenames, '', r'\.py$')
+ assert tuple(ret) == ('.pre-commit-hooks.yaml',)
+
+
+def test_args_hook_only(cap_out, store, repo_with_passing_hook):
+ config = {
+ 'repo': 'local',
+ 'hooks': [
+ {
+ 'id': 'identity-copy',
+ 'name': 'identity-copy',
+ 'entry': '{} -m pre_commit.meta_hooks.identity'.format(
+ shlex.quote(sys.executable),
+ ),
+ 'language': 'system',
+ 'files': r'\.py$',
+ 'stages': ['pre-commit'],
+ },
+ {
+ 'id': 'do_not_commit',
+ 'name': 'Block if "DO NOT COMMIT" is found',
+ 'entry': 'DO NOT COMMIT',
+ 'language': 'pygrep',
+ },
+ ],
+ }
+ add_config_to_repo(repo_with_passing_hook, config)
+ stage_a_file()
+ ret, printed = _do_run(
+ cap_out,
+ store,
+ repo_with_passing_hook,
+ run_opts(hook='do_not_commit'),
+ )
+ assert b'identity-copy' not in printed
+
+
+def test_skipped_without_any_setup_for_post_checkout(in_git_dir, store):
+ environ = {'_PRE_COMMIT_SKIP_POST_CHECKOUT': '1'}
+ opts = run_opts(hook_stage='post-checkout')
+ assert run(C.CONFIG_FILE, store, opts, environ=environ) == 0
+
+
+def test_pre_commit_env_variable_set(cap_out, store, repo_with_passing_hook):
+ args = run_opts()
+ environ: MutableMapping[str, str] = {}
+ ret, printed = _do_run(
+ cap_out, store, repo_with_passing_hook, args, environ,
+ )
+ assert environ['PRE_COMMIT'] == '1'
diff --git a/tests/commands/sample_config_test.py b/tests/commands/sample_config_test.py
new file mode 100644
index 0000000..cf56e98
--- /dev/null
+++ b/tests/commands/sample_config_test.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from pre_commit.commands.sample_config import sample_config
+
+
+def test_sample_config(capsys):
+ ret = sample_config()
+ assert ret == 0
+ out, _ = capsys.readouterr()
+ assert out == '''\
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.2.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+ - id: check-added-large-files
+'''
diff --git a/tests/commands/try_repo_test.py b/tests/commands/try_repo_test.py
new file mode 100644
index 0000000..c5f891e
--- /dev/null
+++ b/tests/commands/try_repo_test.py
@@ -0,0 +1,155 @@
+from __future__ import annotations
+
+import os.path
+import re
+import time
+from unittest import mock
+
+import re_assert
+
+from pre_commit import git
+from pre_commit.commands.try_repo import try_repo
+from pre_commit.util import cmd_output
+from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import git_dir
+from testing.fixtures import make_repo
+from testing.fixtures import modify_manifest
+from testing.util import cwd
+from testing.util import git_commit
+from testing.util import run_opts
+
+
+def try_repo_opts(repo, ref=None, **kwargs):
+ return auto_namedtuple(repo=repo, ref=ref, **run_opts(**kwargs)._asdict())
+
+
+def _get_out(cap_out):
+ out = re.sub(r'\[INFO\].+\n', '', cap_out.get())
+ start, using_config, config, rest = out.split(f'{"=" * 79}\n')
+ assert using_config == 'Using config:\n'
+ return start, config, rest
+
+
+def _add_test_file():
+ open('test-file', 'a').close()
+ cmd_output('git', 'add', '.')
+
+
+def _run_try_repo(tempdir_factory, **kwargs):
+ repo = make_repo(tempdir_factory, 'modified_file_returns_zero_repo')
+ with cwd(git_dir(tempdir_factory)):
+ _add_test_file()
+ assert not try_repo(try_repo_opts(repo, **kwargs))
+
+
+def test_try_repo_repo_only(cap_out, tempdir_factory):
+ with mock.patch.object(time, 'monotonic', return_value=0.0):
+ _run_try_repo(tempdir_factory, verbose=True)
+ start, config, rest = _get_out(cap_out)
+ assert start == ''
+ config_pattern = re_assert.Matches(
+ '^repos:\n'
+ '- repo: .+\n'
+ ' rev: .+\n'
+ ' hooks:\n'
+ ' - id: bash_hook\n'
+ ' - id: bash_hook2\n'
+ ' - id: bash_hook3\n$',
+ )
+ config_pattern.assert_matches(config)
+ assert rest == '''\
+Bash hook............................................(no files to check)Skipped
+- hook id: bash_hook
+Bash hook................................................................Passed
+- hook id: bash_hook2
+- duration: 0s
+
+test-file
+
+Bash hook............................................(no files to check)Skipped
+- hook id: bash_hook3
+'''
+
+
+def test_try_repo_with_specific_hook(cap_out, tempdir_factory):
+ _run_try_repo(tempdir_factory, hook='bash_hook', verbose=True)
+ start, config, rest = _get_out(cap_out)
+ assert start == ''
+ config_pattern = re_assert.Matches(
+ '^repos:\n'
+ '- repo: .+\n'
+ ' rev: .+\n'
+ ' hooks:\n'
+ ' - id: bash_hook\n$',
+ )
+ config_pattern.assert_matches(config)
+ assert rest == '''\
+Bash hook............................................(no files to check)Skipped
+- hook id: bash_hook
+'''
+
+
+def test_try_repo_relative_path(cap_out, tempdir_factory):
+ repo = make_repo(tempdir_factory, 'modified_file_returns_zero_repo')
+ with cwd(git_dir(tempdir_factory)):
+ _add_test_file()
+ relative_repo = os.path.relpath(repo, '.')
+ # previously crashed on cloning a relative path
+ assert not try_repo(try_repo_opts(relative_repo, hook='bash_hook'))
+
+
+def test_try_repo_bare_repo(cap_out, tempdir_factory):
+ repo = make_repo(tempdir_factory, 'modified_file_returns_zero_repo')
+ with cwd(git_dir(tempdir_factory)):
+ _add_test_file()
+ bare_repo = os.path.join(repo, '.git')
+ # previously crashed attempting modification changes
+ assert not try_repo(try_repo_opts(bare_repo, hook='bash_hook'))
+
+
+def test_try_repo_specific_revision(cap_out, tempdir_factory):
+ repo = make_repo(tempdir_factory, 'script_hooks_repo')
+ ref = git.head_rev(repo)
+ git_commit(cwd=repo)
+ with cwd(git_dir(tempdir_factory)):
+ _add_test_file()
+ assert not try_repo(try_repo_opts(repo, ref=ref))
+
+ _, config, _ = _get_out(cap_out)
+ assert ref in config
+
+
+def test_try_repo_uncommitted_changes(cap_out, tempdir_factory):
+ repo = make_repo(tempdir_factory, 'script_hooks_repo')
+ # make an uncommitted change
+ with modify_manifest(repo, commit=False) as manifest:
+ manifest[0]['name'] = 'modified name!'
+
+ with cwd(git_dir(tempdir_factory)):
+ open('test-fie', 'a').close()
+ cmd_output('git', 'add', '.')
+ assert not try_repo(try_repo_opts(repo))
+
+ start, config, rest = _get_out(cap_out)
+ assert start == '[WARNING] Creating temporary repo with uncommitted changes...\n' # noqa: E501
+ config_pattern = re_assert.Matches(
+ '^repos:\n'
+ '- repo: .+shadow-repo\n'
+ ' rev: .+\n'
+ ' hooks:\n'
+ ' - id: bash_hook\n$',
+ )
+ config_pattern.assert_matches(config)
+ assert rest == 'modified name!...........................................................Passed\n' # noqa: E501
+
+
+def test_try_repo_staged_changes(tempdir_factory):
+ repo = make_repo(tempdir_factory, 'modified_file_returns_zero_repo')
+
+ with cwd(repo):
+ open('staged-file', 'a').close()
+ open('second-staged-file', 'a').close()
+ cmd_output('git', 'add', '.')
+
+ with cwd(git_dir(tempdir_factory)):
+ assert not try_repo(try_repo_opts(repo, hook='bash_hook'))
diff --git a/tests/commands/validate_config_test.py b/tests/commands/validate_config_test.py
new file mode 100644
index 0000000..a475cd8
--- /dev/null
+++ b/tests/commands/validate_config_test.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import logging
+
+from pre_commit.commands.validate_config import validate_config
+
+
+def test_validate_config_ok():
+ assert not validate_config(('.pre-commit-config.yaml',))
+
+
+def test_validate_warn_on_unknown_keys_at_repo_level(tmpdir, caplog):
+ f = tmpdir.join('cfg.yaml')
+ f.write(
+ 'repos:\n'
+ '- repo: https://gitlab.com/pycqa/flake8\n'
+ ' rev: 3.7.7\n'
+ ' hooks:\n'
+ ' - id: flake8\n'
+ ' args: [--some-args]\n',
+ )
+ ret_val = validate_config((f.strpath,))
+ assert not ret_val
+ assert caplog.record_tuples == [
+ (
+ 'pre_commit',
+ logging.WARNING,
+ 'Unexpected key(s) present on https://gitlab.com/pycqa/flake8: '
+ 'args',
+ ),
+ ]
+
+
+def test_validate_warn_on_unknown_keys_at_top_level(tmpdir, caplog):
+ f = tmpdir.join('cfg.yaml')
+ f.write(
+ 'repos:\n'
+ '- repo: https://gitlab.com/pycqa/flake8\n'
+ ' rev: 3.7.7\n'
+ ' hooks:\n'
+ ' - id: flake8\n'
+ 'foo:\n'
+ ' id: 1.0.0\n',
+ )
+ ret_val = validate_config((f.strpath,))
+ assert not ret_val
+ assert caplog.record_tuples == [
+ (
+ 'pre_commit',
+ logging.WARNING,
+ 'Unexpected key(s) present at root: foo',
+ ),
+ ]
+
+
+def test_mains_not_ok(tmpdir):
+ not_yaml = tmpdir.join('f.notyaml')
+ not_yaml.write('{')
+ not_schema = tmpdir.join('notconfig.yaml')
+ not_schema.write('{}')
+
+ assert validate_config(('does-not-exist',))
+ assert validate_config((not_yaml.strpath,))
+ assert validate_config((not_schema.strpath,))
diff --git a/tests/commands/validate_manifest_test.py b/tests/commands/validate_manifest_test.py
new file mode 100644
index 0000000..a4bc8ac
--- /dev/null
+++ b/tests/commands/validate_manifest_test.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from pre_commit.commands.validate_manifest import validate_manifest
+
+
+def test_validate_manifest_ok():
+ assert not validate_manifest(('.pre-commit-hooks.yaml',))
+
+
+def test_not_ok(tmpdir):
+ not_yaml = tmpdir.join('f.notyaml')
+ not_yaml.write('{')
+ not_schema = tmpdir.join('notconfig.yaml')
+ not_schema.write('{}')
+
+ assert validate_manifest(('does-not-exist',))
+ assert validate_manifest((not_yaml.strpath,))
+ assert validate_manifest((not_schema.strpath,))
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..3076171
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,252 @@
+from __future__ import annotations
+
+import functools
+import io
+import logging
+import os.path
+from unittest import mock
+
+import pytest
+
+from pre_commit import output
+from pre_commit.envcontext import envcontext
+from pre_commit.logging_handler import logging_handler
+from pre_commit.store import Store
+from pre_commit.util import cmd_output
+from pre_commit.util import make_executable
+from testing.fixtures import git_dir
+from testing.fixtures import make_consuming_repo
+from testing.fixtures import write_config
+from testing.util import cwd
+from testing.util import git_commit
+
+
+@pytest.fixture
+def tempdir_factory(tmpdir):
+ class TmpdirFactory:
+ def __init__(self):
+ self.tmpdir_count = 0
+
+ def get(self):
+ path = tmpdir.join(str(self.tmpdir_count)).strpath
+ self.tmpdir_count += 1
+ os.mkdir(path)
+ return path
+
+ yield TmpdirFactory()
+
+
+@pytest.fixture
+def in_tmpdir(tempdir_factory):
+ path = tempdir_factory.get()
+ with cwd(path):
+ yield path
+
+
+@pytest.fixture
+def in_git_dir(tmpdir):
+ repo = tmpdir.join('repo').ensure_dir()
+ with repo.as_cwd():
+ cmd_output('git', 'init')
+ yield repo
+
+
+def _make_conflict():
+ cmd_output('git', 'checkout', 'origin/master', '-b', 'foo')
+ with open('conflict_file', 'w') as conflict_file:
+ conflict_file.write('herp\nderp\n')
+ cmd_output('git', 'add', 'conflict_file')
+ with open('foo_only_file', 'w') as foo_only_file:
+ foo_only_file.write('foo')
+ cmd_output('git', 'add', 'foo_only_file')
+ git_commit(msg=_make_conflict.__name__)
+ cmd_output('git', 'checkout', 'origin/master', '-b', 'bar')
+ with open('conflict_file', 'w') as conflict_file:
+ conflict_file.write('harp\nddrp\n')
+ cmd_output('git', 'add', 'conflict_file')
+ with open('bar_only_file', 'w') as bar_only_file:
+ bar_only_file.write('bar')
+ cmd_output('git', 'add', 'bar_only_file')
+ git_commit(msg=_make_conflict.__name__)
+ cmd_output('git', 'merge', 'foo', check=False)
+
+
+@pytest.fixture
+def in_merge_conflict(tempdir_factory):
+ path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
+ open(os.path.join(path, 'placeholder'), 'a').close()
+ cmd_output('git', 'add', 'placeholder', cwd=path)
+ git_commit(msg=in_merge_conflict.__name__, cwd=path)
+
+ conflict_path = tempdir_factory.get()
+ cmd_output('git', 'clone', path, conflict_path)
+ with cwd(conflict_path):
+ _make_conflict()
+ yield os.path.join(conflict_path)
+
+
+@pytest.fixture
+def in_conflicting_submodule(tempdir_factory):
+ git_dir_1 = git_dir(tempdir_factory)
+ git_dir_2 = git_dir(tempdir_factory)
+ git_commit(msg=in_conflicting_submodule.__name__, cwd=git_dir_2)
+ cmd_output('git', 'submodule', 'add', git_dir_2, 'sub', cwd=git_dir_1)
+ with cwd(os.path.join(git_dir_1, 'sub')):
+ _make_conflict()
+ yield
+
+
+@pytest.fixture
+def commit_msg_repo(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'must-have-signoff',
+ 'name': 'Must have "Signed off by:"',
+ 'entry': 'grep -q "Signed off by:"',
+ 'language': 'system',
+ 'stages': ['commit-msg'],
+ }],
+ }
+ write_config(path, config)
+ with cwd(path):
+ cmd_output('git', 'add', '.')
+ git_commit(msg=commit_msg_repo.__name__)
+ yield path
+
+
+@pytest.fixture
+def prepare_commit_msg_repo(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ script_name = 'add_sign_off.sh'
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'add-signoff',
+ 'name': 'Add "Signed off by:"',
+ 'entry': f'./{script_name}',
+ 'language': 'script',
+ 'stages': ['prepare-commit-msg'],
+ }],
+ }
+ write_config(path, config)
+ with cwd(path):
+ with open(script_name, 'w') as script_file:
+ script_file.write(
+ '#!/usr/bin/env bash\n'
+ 'set -eu\n'
+ 'echo "\nSigned off by: " >> "$1"\n',
+ )
+ make_executable(script_name)
+ cmd_output('git', 'add', '.')
+ git_commit(msg=prepare_commit_msg_repo.__name__)
+ yield path
+
+
+@pytest.fixture
+def failing_prepare_commit_msg_repo(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'add-signoff',
+ 'name': 'Add "Signed off by:"',
+ 'entry': 'bash -c "exit 1"',
+ 'language': 'system',
+ 'stages': ['prepare-commit-msg'],
+ }],
+ }
+ write_config(path, config)
+ with cwd(path):
+ cmd_output('git', 'add', '.')
+ git_commit(msg=failing_prepare_commit_msg_repo.__name__)
+ yield path
+
+
+@pytest.fixture(autouse=True, scope='session')
+def dont_write_to_home_directory():
+ """pre_commit.store.Store will by default write to the home directory
+ We'll mock out `Store.get_default_directory` to raise invariantly so we
+ don't construct a `Store` object that writes to our home directory.
+ """
+ class YouForgotToExplicitlyChooseAStoreDirectory(AssertionError):
+ pass
+
+ with mock.patch.object(
+ Store,
+ 'get_default_directory',
+ side_effect=YouForgotToExplicitlyChooseAStoreDirectory,
+ ):
+ yield
+
+
+@pytest.fixture(autouse=True, scope='session')
+def configure_logging():
+ with logging_handler(use_color=False):
+ yield
+
+
+@pytest.fixture
+def mock_store_dir(tempdir_factory):
+ tmpdir = tempdir_factory.get()
+ with mock.patch.object(
+ Store,
+ 'get_default_directory',
+ return_value=tmpdir,
+ ):
+ yield tmpdir
+
+
+@pytest.fixture
+def store(tempdir_factory):
+ yield Store(os.path.join(tempdir_factory.get(), '.pre-commit'))
+
+
+@pytest.fixture
+def log_info_mock():
+ with mock.patch.object(logging.getLogger('pre_commit'), 'info') as mck:
+ yield mck
+
+
+class FakeStream:
+ def __init__(self):
+ self.data = io.BytesIO()
+
+ def write(self, s):
+ self.data.write(s)
+
+ def flush(self):
+ pass
+
+
+class Fixture:
+ def __init__(self, stream):
+ self._stream = stream
+
+ def get_bytes(self):
+ """Get the output as-if no encoding occurred"""
+ data = self._stream.data.getvalue()
+ self._stream.data.seek(0)
+ self._stream.data.truncate()
+ return data.replace(b'\r\n', b'\n')
+
+ def get(self):
+ """Get the output assuming it was written as UTF-8 bytes"""
+ return self.get_bytes().decode()
+
+
+@pytest.fixture
+def cap_out():
+ stream = FakeStream()
+ write = functools.partial(output.write, stream=stream)
+ write_line_b = functools.partial(output.write_line_b, stream=stream)
+ with mock.patch.multiple(output, write=write, write_line_b=write_line_b):
+ yield Fixture(stream)
+
+
+@pytest.fixture(scope='session', autouse=True)
+def set_git_templatedir(tmpdir_factory):
+ tdir = str(tmpdir_factory.mktemp('git_template_dir'))
+ with envcontext((('GIT_TEMPLATE_DIR', tdir),)):
+ yield
diff --git a/tests/envcontext_test.py b/tests/envcontext_test.py
new file mode 100644
index 0000000..c82d326
--- /dev/null
+++ b/tests/envcontext_test.py
@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import os
+from unittest import mock
+
+import pytest
+
+from pre_commit.envcontext import envcontext
+from pre_commit.envcontext import UNSET
+from pre_commit.envcontext import Var
+
+
+def _test(*, before, patch, expected):
+ env = before.copy()
+ with envcontext(patch, _env=env):
+ assert env == expected
+ assert env == before
+
+
+def test_trivial():
+ _test(before={}, patch={}, expected={})
+
+
+def test_noop():
+ _test(before={'foo': 'bar'}, patch=(), expected={'foo': 'bar'})
+
+
+def test_adds():
+ _test(before={}, patch=[('foo', 'bar')], expected={'foo': 'bar'})
+
+
+def test_overrides():
+ _test(
+ before={'foo': 'baz'},
+ patch=[('foo', 'bar')],
+ expected={'foo': 'bar'},
+ )
+
+
+def test_unset_but_nothing_to_unset():
+ _test(before={}, patch=[('foo', UNSET)], expected={})
+
+
+def test_unset_things_to_remove():
+ _test(
+ before={'PYTHONHOME': ''},
+ patch=[('PYTHONHOME', UNSET)],
+ expected={},
+ )
+
+
+def test_templated_environment_variable_missing():
+ _test(
+ before={},
+ patch=[('PATH', ('~/bin:', Var('PATH')))],
+ expected={'PATH': '~/bin:'},
+ )
+
+
+def test_templated_environment_variable_defaults():
+ _test(
+ before={},
+ patch=[('PATH', ('~/bin:', Var('PATH', default='/bin')))],
+ expected={'PATH': '~/bin:/bin'},
+ )
+
+
+def test_templated_environment_variable_there():
+ _test(
+ before={'PATH': '/usr/local/bin:/usr/bin'},
+ patch=[('PATH', ('~/bin:', Var('PATH')))],
+ expected={'PATH': '~/bin:/usr/local/bin:/usr/bin'},
+ )
+
+
+def test_templated_environ_sources_from_previous():
+ _test(
+ before={'foo': 'bar'},
+ patch=(
+ ('foo', 'baz'),
+ ('herp', ('foo: ', Var('foo'))),
+ ),
+ expected={'foo': 'baz', 'herp': 'foo: bar'},
+ )
+
+
+def test_exception_safety():
+ class MyError(RuntimeError):
+ pass
+
+ env = {'hello': 'world'}
+ with pytest.raises(MyError):
+ with envcontext((('foo', 'bar'),), _env=env):
+ raise MyError()
+ assert env == {'hello': 'world'}
+
+
+def test_integration_os_environ():
+ with mock.patch.dict(os.environ, {'FOO': 'bar'}, clear=True):
+ assert os.environ == {'FOO': 'bar'}
+ with envcontext((('HERP', 'derp'),)):
+ assert os.environ == {'FOO': 'bar', 'HERP': 'derp'}
+ assert os.environ == {'FOO': 'bar'}
diff --git a/tests/error_handler_test.py b/tests/error_handler_test.py
new file mode 100644
index 0000000..a79d9c1
--- /dev/null
+++ b/tests/error_handler_test.py
@@ -0,0 +1,220 @@
+from __future__ import annotations
+
+import os.path
+import stat
+import sys
+from unittest import mock
+
+import pytest
+import re_assert
+
+from pre_commit import error_handler
+from pre_commit.errors import FatalError
+from pre_commit.store import Store
+from pre_commit.util import CalledProcessError
+from testing.util import cmd_output_mocked_pre_commit_home
+from testing.util import xfailif_windows
+
+
+@pytest.fixture
+def mocked_log_and_exit():
+ with mock.patch.object(error_handler, '_log_and_exit') as log_and_exit:
+ yield log_and_exit
+
+
+def test_error_handler_no_exception(mocked_log_and_exit):
+ with error_handler.error_handler():
+ pass
+ assert mocked_log_and_exit.call_count == 0
+
+
+def test_error_handler_fatal_error(mocked_log_and_exit):
+ exc = FatalError('just a test')
+ with error_handler.error_handler():
+ raise exc
+
+ mocked_log_and_exit.assert_called_once_with(
+ 'An error has occurred',
+ 1,
+ exc,
+ # Tested below
+ mock.ANY,
+ )
+
+ pattern = re_assert.Matches(
+ r'Traceback \(most recent call last\):\n'
+ r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
+ r' yield\n'
+ r'( \^\^\^\^\^\n)?'
+ r' File ".+tests.error_handler_test.py", line \d+, '
+ r'in test_error_handler_fatal_error\n'
+ r' raise exc\n'
+ r'( \^\^\^\^\^\^\^\^\^\n)?'
+ r'(pre_commit\.errors\.)?FatalError: just a test\n',
+ )
+ pattern.assert_matches(mocked_log_and_exit.call_args[0][3])
+
+
+def test_error_handler_uncaught_error(mocked_log_and_exit):
+ exc = ValueError('another test')
+ with error_handler.error_handler():
+ raise exc
+
+ mocked_log_and_exit.assert_called_once_with(
+ 'An unexpected error has occurred',
+ 3,
+ exc,
+ # Tested below
+ mock.ANY,
+ )
+ pattern = re_assert.Matches(
+ r'Traceback \(most recent call last\):\n'
+ r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
+ r' yield\n'
+ r'( \^\^\^\^\^\n)?'
+ r' File ".+tests.error_handler_test.py", line \d+, '
+ r'in test_error_handler_uncaught_error\n'
+ r' raise exc\n'
+ r'( \^\^\^\^\^\^\^\^\^\n)?'
+ r'ValueError: another test\n',
+ )
+ pattern.assert_matches(mocked_log_and_exit.call_args[0][3])
+
+
+def test_error_handler_keyboardinterrupt(mocked_log_and_exit):
+ exc = KeyboardInterrupt()
+ with error_handler.error_handler():
+ raise exc
+
+ mocked_log_and_exit.assert_called_once_with(
+ 'Interrupted (^C)',
+ 130,
+ exc,
+ # Tested below
+ mock.ANY,
+ )
+ pattern = re_assert.Matches(
+ r'Traceback \(most recent call last\):\n'
+ r' File ".+pre_commit.error_handler.py", line \d+, in error_handler\n'
+ r' yield\n'
+ r'( \^\^\^\^\^\n)?'
+ r' File ".+tests.error_handler_test.py", line \d+, '
+ r'in test_error_handler_keyboardinterrupt\n'
+ r' raise exc\n'
+ r'( \^\^\^\^\^\^\^\^\^\n)?'
+ r'KeyboardInterrupt\n',
+ )
+ pattern.assert_matches(mocked_log_and_exit.call_args[0][3])
+
+
+def test_log_and_exit(cap_out, mock_store_dir):
+ tb = (
+ 'Traceback (most recent call last):\n'
+ ' File "<stdin>", line 2, in <module>\n'
+ 'pre_commit.errors.FatalError: hai\n'
+ )
+
+ with pytest.raises(SystemExit) as excinfo:
+ error_handler._log_and_exit('msg', 1, FatalError('hai'), tb)
+ assert excinfo.value.code == 1
+
+ printed = cap_out.get()
+ log_file = os.path.join(mock_store_dir, 'pre-commit.log')
+ assert printed == f'msg: FatalError: hai\nCheck the log at {log_file}\n'
+
+ assert os.path.exists(log_file)
+ with open(log_file) as f:
+ logged = f.read()
+ pattern = re_assert.Matches(
+ r'^### version information\n'
+ r'\n'
+ r'```\n'
+ r'pre-commit version: \d+\.\d+\.\d+\n'
+ r'git --version: git version .+\n'
+ r'sys.version:\n'
+ r'( .*\n)*'
+ r'sys.executable: .*\n'
+ r'os.name: .*\n'
+ r'sys.platform: .*\n'
+ r'```\n'
+ r'\n'
+ r'### error information\n'
+ r'\n'
+ r'```\n'
+ r'msg: FatalError: hai\n'
+ r'```\n'
+ r'\n'
+ r'```\n'
+ r'Traceback \(most recent call last\):\n'
+ r' File "<stdin>", line 2, in <module>\n'
+ r'pre_commit\.errors\.FatalError: hai\n'
+ r'```\n',
+ )
+ pattern.assert_matches(logged)
+
+
+def test_error_handler_non_ascii_exception(mock_store_dir):
+ with pytest.raises(SystemExit):
+ with error_handler.error_handler():
+ raise ValueError('β˜ƒ')
+
+
+def test_error_handler_non_utf8_exception(mock_store_dir):
+ with pytest.raises(SystemExit):
+ with error_handler.error_handler():
+ raise CalledProcessError(1, ('exe',), b'error: \xa0\xe1', b'')
+
+
+def test_error_handler_non_stringable_exception(mock_store_dir):
+ class C(Exception):
+ def __str__(self):
+ raise RuntimeError('not today!')
+
+ with pytest.raises(SystemExit):
+ with error_handler.error_handler():
+ raise C()
+
+
+def test_error_handler_no_tty(tempdir_factory):
+ pre_commit_home = tempdir_factory.get()
+ ret, out, _ = cmd_output_mocked_pre_commit_home(
+ sys.executable,
+ '-c',
+ 'from pre_commit.error_handler import error_handler\n'
+ 'with error_handler():\n'
+ ' raise ValueError("\\u2603")\n',
+ check=False,
+ tempdir_factory=tempdir_factory,
+ pre_commit_home=pre_commit_home,
+ )
+ assert ret == 3
+ log_file = os.path.join(pre_commit_home, 'pre-commit.log')
+ out_lines = out.splitlines()
+ assert out_lines[-2] == 'An unexpected error has occurred: ValueError: β˜ƒ'
+ assert out_lines[-1] == f'Check the log at {log_file}'
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_error_handler_read_only_filesystem(mock_store_dir, cap_out, capsys):
+ # a better scenario would be if even the Store crash would be handled
+ # but realistically we're only targetting systems where the Store has
+ # already been set up
+ Store()
+
+ write = (stat.S_IWGRP | stat.S_IWOTH | stat.S_IWUSR)
+ os.chmod(mock_store_dir, os.stat(mock_store_dir).st_mode & ~write)
+
+ with pytest.raises(SystemExit):
+ with error_handler.error_handler():
+ raise ValueError('ohai')
+
+ output = cap_out.get()
+ assert output.startswith(
+ 'An unexpected error has occurred: ValueError: ohai\n'
+ 'Failed to write to log at ',
+ )
+
+ # our cap_out mock is imperfect so the rest of the output goes to capsys
+ out, _ = capsys.readouterr()
+ # the things that normally go to the log file will end up here
+ assert '### version information' in out
diff --git a/tests/git_test.py b/tests/git_test.py
new file mode 100644
index 0000000..93f5a1c
--- /dev/null
+++ b/tests/git_test.py
@@ -0,0 +1,284 @@
+from __future__ import annotations
+
+import os.path
+
+import pytest
+
+from pre_commit import git
+from pre_commit.error_handler import FatalError
+from pre_commit.util import cmd_output
+from testing.util import git_commit
+
+
+def test_get_root_at_root(in_git_dir):
+ expected = os.path.normcase(in_git_dir.strpath)
+ assert os.path.normcase(git.get_root()) == expected
+
+
+def test_get_root_deeper(in_git_dir):
+ expected = os.path.normcase(in_git_dir.strpath)
+ with in_git_dir.join('foo').ensure_dir().as_cwd():
+ assert os.path.normcase(git.get_root()) == expected
+
+
+def test_get_root_in_git_sub_dir(in_git_dir):
+ expected = os.path.normcase(in_git_dir.strpath)
+ with pytest.raises(FatalError):
+ with in_git_dir.join('.git/objects').ensure_dir().as_cwd():
+ assert os.path.normcase(git.get_root()) == expected
+
+
+def test_get_root_not_in_working_dir(in_git_dir):
+ expected = os.path.normcase(in_git_dir.strpath)
+ with pytest.raises(FatalError):
+ with in_git_dir.join('..').ensure_dir().as_cwd():
+ assert os.path.normcase(git.get_root()) == expected
+
+
+def test_in_exactly_dot_git(in_git_dir):
+ with in_git_dir.join('.git').as_cwd(), pytest.raises(FatalError):
+ git.get_root()
+
+
+def test_get_root_bare_worktree(tmpdir):
+ src = tmpdir.join('src').ensure_dir()
+ cmd_output('git', 'init', str(src))
+ git_commit(cwd=str(src))
+
+ bare = tmpdir.join('bare.git').ensure_dir()
+ cmd_output('git', 'clone', '--bare', str(src), str(bare))
+
+ cmd_output('git', 'worktree', 'add', 'foo', 'HEAD', cwd=bare)
+
+ with bare.join('foo').as_cwd():
+ assert git.get_root() == os.path.abspath('.')
+
+
+def test_get_git_dir(tmpdir):
+ """Regression test for #1972"""
+ src = tmpdir.join('src').ensure_dir()
+ cmd_output('git', 'init', str(src))
+ git_commit(cwd=str(src))
+
+ worktree = tmpdir.join('worktree').ensure_dir()
+ cmd_output('git', 'worktree', 'add', '../worktree', cwd=src)
+
+ with worktree.as_cwd():
+ assert git.get_git_dir() == src.ensure_dir(
+ '.git/worktrees/worktree',
+ )
+ assert git.get_git_common_dir() == src.ensure_dir('.git')
+
+
+def test_get_root_worktree_in_git(tmpdir):
+ src = tmpdir.join('src').ensure_dir()
+ cmd_output('git', 'init', str(src))
+ git_commit(cwd=str(src))
+
+ cmd_output('git', 'worktree', 'add', '.git/trees/foo', 'HEAD', cwd=src)
+
+ with src.join('.git/trees/foo').as_cwd():
+ assert git.get_root() == os.path.abspath('.')
+
+
+def test_get_staged_files_deleted(in_git_dir):
+ in_git_dir.join('test').ensure()
+ cmd_output('git', 'add', 'test')
+ git_commit()
+ cmd_output('git', 'rm', '--cached', 'test')
+ assert git.get_staged_files() == []
+
+
+def test_is_not_in_merge_conflict(in_git_dir):
+ assert git.is_in_merge_conflict() is False
+
+
+def test_is_in_merge_conflict(in_merge_conflict):
+ assert git.is_in_merge_conflict() is True
+
+
+def test_is_in_merge_conflict_submodule(in_conflicting_submodule):
+ assert git.is_in_merge_conflict() is True
+
+
+def test_cherry_pick_conflict(in_merge_conflict):
+ cmd_output('git', 'merge', '--abort')
+ foo_ref = cmd_output('git', 'rev-parse', 'foo')[1].strip()
+ cmd_output('git', 'cherry-pick', foo_ref, check=False)
+ assert git.is_in_merge_conflict() is False
+
+
+def resolve_conflict():
+ with open('conflict_file', 'w') as conflicted_file:
+ conflicted_file.write('herp\nderp\n')
+ cmd_output('git', 'add', 'conflict_file')
+
+
+def test_get_conflicted_files(in_merge_conflict):
+ resolve_conflict()
+ with open('other_file', 'w') as other_file:
+ other_file.write('oh hai')
+ cmd_output('git', 'add', 'other_file')
+
+ ret = set(git.get_conflicted_files())
+ assert ret == {'conflict_file', 'other_file'}
+
+
+def test_get_conflicted_files_in_submodule(in_conflicting_submodule):
+ resolve_conflict()
+ assert set(git.get_conflicted_files()) == {'conflict_file'}
+
+
+def test_get_conflicted_files_unstaged_files(in_merge_conflict):
+ """This case no longer occurs, but it is a useful test nonetheless"""
+ resolve_conflict()
+
+ # Make unstaged file.
+ with open('bar_only_file', 'w') as bar_only_file:
+ bar_only_file.write('new contents!\n')
+
+ ret = set(git.get_conflicted_files())
+ assert ret == {'conflict_file'}
+
+
+MERGE_MSG = b"Merge branch 'foo' into bar\n\nConflicts:\n\tconflict_file\n"
+OTHER_MERGE_MSG = MERGE_MSG + b'\tother_conflict_file\n'
+
+
+@pytest.mark.parametrize(
+ ('input', 'expected_output'),
+ (
+ (MERGE_MSG, ['conflict_file']),
+ (OTHER_MERGE_MSG, ['conflict_file', 'other_conflict_file']),
+ ),
+)
+def test_parse_merge_msg_for_conflicts(input, expected_output):
+ ret = git.parse_merge_msg_for_conflicts(input)
+ assert ret == expected_output
+
+
+def test_get_changed_files(in_git_dir):
+ git_commit()
+ in_git_dir.join('a.txt').ensure()
+ in_git_dir.join('b.txt').ensure()
+ cmd_output('git', 'add', '.')
+ git_commit()
+ files = git.get_changed_files('HEAD^', 'HEAD')
+ assert files == ['a.txt', 'b.txt']
+
+ # files changed in source but not in origin should not be returned
+ files = git.get_changed_files('HEAD', 'HEAD^')
+ assert files == []
+
+
+def test_get_changed_files_disparate_histories(in_git_dir):
+ """in modern versions of git, `...` does not fall back to full diff"""
+ git_commit()
+ in_git_dir.join('a.txt').ensure()
+ cmd_output('git', 'add', '.')
+ git_commit()
+ cmd_output('git', 'branch', '-m', 'branch1')
+
+ cmd_output('git', 'checkout', '--orphan', 'branch2')
+ cmd_output('git', 'rm', '-rf', '.')
+ in_git_dir.join('a.txt').ensure()
+ in_git_dir.join('b.txt').ensure()
+ cmd_output('git', 'add', '.')
+ git_commit()
+
+ assert git.get_changed_files('branch1', 'branch2') == ['b.txt']
+
+
+@pytest.mark.parametrize(
+ ('s', 'expected'),
+ (
+ ('foo\0bar\0', ['foo', 'bar']),
+ ('foo\0', ['foo']),
+ ('', []),
+ ('foo', ['foo']),
+ ),
+)
+def test_zsplit(s, expected):
+ assert git.zsplit(s) == expected
+
+
+@pytest.fixture
+def non_ascii_repo(in_git_dir):
+ git_commit()
+ in_git_dir.join('ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ').ensure()
+ cmd_output('git', 'add', '.')
+ git_commit()
+ yield in_git_dir
+
+
+def test_all_files_non_ascii(non_ascii_repo):
+ ret = git.get_all_files()
+ assert ret == ['ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ']
+
+
+def test_staged_files_non_ascii(non_ascii_repo):
+ non_ascii_repo.join('ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ').write('hi')
+ cmd_output('git', 'add', '.')
+ assert git.get_staged_files() == ['ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ']
+
+
+def test_changed_files_non_ascii(non_ascii_repo):
+ ret = git.get_changed_files('HEAD^', 'HEAD')
+ assert ret == ['ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ']
+
+
+def test_get_conflicted_files_non_ascii(in_merge_conflict):
+ open('ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ', 'a').close()
+ cmd_output('git', 'add', '.')
+ ret = git.get_conflicted_files()
+ assert ret == {'conflict_file', 'ΠΈΠ½Ρ‚Π΅Ρ€Π²ΡŒΡŽ'}
+
+
+def test_intent_to_add(in_git_dir):
+ in_git_dir.join('a').ensure()
+ cmd_output('git', 'add', '--intent-to-add', 'a')
+
+ assert git.intent_to_add_files() == ['a']
+
+
+def test_status_output_with_rename(in_git_dir):
+ in_git_dir.join('a').write('1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n')
+ cmd_output('git', 'add', 'a')
+ git_commit()
+ cmd_output('git', 'mv', 'a', 'b')
+ in_git_dir.join('c').ensure()
+ cmd_output('git', 'add', '--intent-to-add', 'c')
+
+ assert git.intent_to_add_files() == ['c']
+
+
+def test_no_git_env():
+ env = {
+ 'http_proxy': 'http://myproxy:80',
+ 'GIT_EXEC_PATH': '/some/git/exec/path',
+ 'GIT_SSH': '/usr/bin/ssh',
+ 'GIT_SSH_COMMAND': 'ssh -o',
+ 'GIT_DIR': '/none/shall/pass',
+ 'GIT_CONFIG_KEY_0': 'user.name',
+ 'GIT_CONFIG_VALUE_0': 'anthony',
+ 'GIT_CONFIG_KEY_1': 'user.email',
+ 'GIT_CONFIG_VALUE_1': 'asottile@example.com',
+ 'GIT_CONFIG_COUNT': '2',
+ }
+ no_git_env = git.no_git_env(env)
+ assert no_git_env == {
+ 'http_proxy': 'http://myproxy:80',
+ 'GIT_EXEC_PATH': '/some/git/exec/path',
+ 'GIT_SSH': '/usr/bin/ssh',
+ 'GIT_SSH_COMMAND': 'ssh -o',
+ 'GIT_CONFIG_KEY_0': 'user.name',
+ 'GIT_CONFIG_VALUE_0': 'anthony',
+ 'GIT_CONFIG_KEY_1': 'user.email',
+ 'GIT_CONFIG_VALUE_1': 'asottile@example.com',
+ 'GIT_CONFIG_COUNT': '2',
+ }
+
+
+def test_init_repo_no_hooks(tmpdir):
+ git.init_repo(str(tmpdir), remote='dne')
+ assert not tmpdir.join('.git/hooks').exists()
diff --git a/tests/lang_base_test.py b/tests/lang_base_test.py
new file mode 100644
index 0000000..da289ae
--- /dev/null
+++ b/tests/lang_base_test.py
@@ -0,0 +1,166 @@
+from __future__ import annotations
+
+import os.path
+import sys
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import lang_base
+from pre_commit import parse_shebang
+from pre_commit import xargs
+from pre_commit.prefix import Prefix
+from pre_commit.util import CalledProcessError
+
+
+@pytest.fixture
+def find_exe_mck():
+ with mock.patch.object(parse_shebang, 'find_executable') as mck:
+ yield mck
+
+
+@pytest.fixture
+def homedir_mck():
+ def fake_expanduser(pth):
+ assert pth == '~'
+ return os.path.normpath('/home/me')
+
+ with mock.patch.object(os.path, 'expanduser', fake_expanduser):
+ yield
+
+
+def test_exe_exists_does_not_exist(find_exe_mck, homedir_mck):
+ find_exe_mck.return_value = None
+ assert lang_base.exe_exists('ruby') is False
+
+
+def test_exe_exists_exists(find_exe_mck, homedir_mck):
+ find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
+ assert lang_base.exe_exists('ruby') is True
+
+
+def test_exe_exists_false_if_shim(find_exe_mck, homedir_mck):
+ find_exe_mck.return_value = os.path.normpath('/foo/shims/ruby')
+ assert lang_base.exe_exists('ruby') is False
+
+
+def test_exe_exists_false_if_homedir(find_exe_mck, homedir_mck):
+ find_exe_mck.return_value = os.path.normpath('/home/me/somedir/ruby')
+ assert lang_base.exe_exists('ruby') is False
+
+
+def test_exe_exists_commonpath_raises_ValueError(find_exe_mck, homedir_mck):
+ find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
+ with mock.patch.object(os.path, 'commonpath', side_effect=ValueError):
+ assert lang_base.exe_exists('ruby') is True
+
+
+def test_exe_exists_true_when_homedir_is_slash(find_exe_mck):
+ find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
+ with mock.patch.object(os.path, 'expanduser', return_value=os.sep):
+ assert lang_base.exe_exists('ruby') is True
+
+
+def test_basic_get_default_version():
+ assert lang_base.basic_get_default_version() == C.DEFAULT
+
+
+def test_basic_health_check():
+ assert lang_base.basic_health_check(Prefix('.'), 'default') is None
+
+
+def test_failed_setup_command_does_not_unicode_error():
+ script = (
+ 'import sys\n'
+ "sys.stderr.buffer.write(b'\\x81\\xfe')\n"
+ 'raise SystemExit(1)\n'
+ )
+
+ # an assertion that this does not raise `UnicodeError`
+ with pytest.raises(CalledProcessError):
+ lang_base.setup_cmd(Prefix('.'), (sys.executable, '-c', script))
+
+
+def test_environment_dir(tmp_path):
+ ret = lang_base.environment_dir(Prefix(tmp_path), 'langenv', 'default')
+ assert ret == f'{tmp_path}{os.sep}langenv-default'
+
+
+def test_assert_version_default():
+ with pytest.raises(AssertionError) as excinfo:
+ lang_base.assert_version_default('lang', '1.2.3')
+ msg, = excinfo.value.args
+ assert msg == (
+ 'for now, pre-commit requires system-installed lang -- '
+ 'you selected `language_version: 1.2.3`'
+ )
+
+
+def test_assert_no_additional_deps():
+ with pytest.raises(AssertionError) as excinfo:
+ lang_base.assert_no_additional_deps('lang', ['hmmm'])
+ msg, = excinfo.value.args
+ assert msg == (
+ 'for now, pre-commit does not support additional_dependencies for '
+ 'lang -- '
+ "you selected `additional_dependencies: ['hmmm']`"
+ )
+
+
+def test_no_env_noop(tmp_path):
+ before = os.environ.copy()
+ with lang_base.no_env(Prefix(tmp_path), '1.2.3'):
+ inside = os.environ.copy()
+ after = os.environ.copy()
+ assert before == inside == after
+
+
+@pytest.fixture
+def cpu_count_mck():
+ with mock.patch.object(xargs, 'cpu_count', return_value=4):
+ yield
+
+
+@pytest.mark.parametrize(
+ ('var', 'expected'),
+ (
+ ('PRE_COMMIT_NO_CONCURRENCY', 1),
+ ('TRAVIS', 2),
+ (None, 4),
+ ),
+)
+def test_target_concurrency(cpu_count_mck, var, expected):
+ with mock.patch.dict(os.environ, {var: '1'} if var else {}, clear=True):
+ assert lang_base.target_concurrency() == expected
+
+
+def test_shuffled_is_deterministic():
+ seq = [str(i) for i in range(10)]
+ expected = ['4', '0', '5', '1', '8', '6', '2', '3', '7', '9']
+ assert lang_base._shuffled(seq) == expected
+
+
+def test_xargs_require_serial_is_not_shuffled():
+ ret, out = lang_base.run_xargs(
+ ('echo',), [str(i) for i in range(10)],
+ require_serial=True,
+ color=False,
+ )
+ assert ret == 0
+ assert out.strip() == b'0 1 2 3 4 5 6 7 8 9'
+
+
+def test_basic_run_hook(tmp_path):
+ ret, out = lang_base.basic_run_hook(
+ Prefix(tmp_path),
+ 'echo hi',
+ ['hello'],
+ ['file', 'file', 'file'],
+ is_local=False,
+ require_serial=False,
+ color=False,
+ )
+ assert ret == 0
+ out = out.replace(b'\r\n', b'\n')
+ assert out == b'hi hello file file file\n'
diff --git a/tests/languages/__init__.py b/tests/languages/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/languages/__init__.py
diff --git a/tests/languages/conda_test.py b/tests/languages/conda_test.py
new file mode 100644
index 0000000..83aaebe
--- /dev/null
+++ b/tests/languages/conda_test.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+import os.path
+
+import pytest
+
+from pre_commit import envcontext
+from pre_commit.languages import conda
+from pre_commit.store import _make_local_repo
+from testing.language_helpers import run_language
+
+
+@pytest.mark.parametrize(
+ ('ctx', 'expected'),
+ (
+ pytest.param(
+ (
+ ('PRE_COMMIT_USE_MICROMAMBA', envcontext.UNSET),
+ ('PRE_COMMIT_USE_MAMBA', envcontext.UNSET),
+ ),
+ 'conda',
+ id='default',
+ ),
+ pytest.param(
+ (
+ ('PRE_COMMIT_USE_MICROMAMBA', '1'),
+ ('PRE_COMMIT_USE_MAMBA', ''),
+ ),
+ 'micromamba',
+ id='default',
+ ),
+ pytest.param(
+ (
+ ('PRE_COMMIT_USE_MICROMAMBA', ''),
+ ('PRE_COMMIT_USE_MAMBA', '1'),
+ ),
+ 'mamba',
+ id='default',
+ ),
+ ),
+)
+def test_conda_exe(ctx, expected):
+ with envcontext.envcontext(ctx):
+ assert conda._conda_exe() == expected
+
+
+def test_conda_language(tmp_path):
+ environment_yml = '''\
+channels: [conda-forge, defaults]
+dependencies: [python, pip]
+'''
+ tmp_path.joinpath('environment.yml').write_text(environment_yml)
+
+ ret, out = run_language(
+ tmp_path,
+ conda,
+ 'python -c "import sys; print(sys.prefix)"',
+ )
+ assert ret == 0
+ assert os.path.basename(out.strip()) == b'conda-default'
+
+
+def test_conda_additional_deps(tmp_path):
+ _make_local_repo(tmp_path)
+
+ ret = run_language(
+ tmp_path,
+ conda,
+ 'python -c "import botocore; print(1)"',
+ deps=('botocore',),
+ )
+ assert ret == (0, b'1\n')
diff --git a/tests/languages/coursier_test.py b/tests/languages/coursier_test.py
new file mode 100644
index 0000000..dbb746c
--- /dev/null
+++ b/tests/languages/coursier_test.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import pytest
+
+from pre_commit.errors import FatalError
+from pre_commit.languages import coursier
+from testing.language_helpers import run_language
+
+
+def test_coursier_hook(tmp_path):
+ echo_java_json = '''\
+{
+ "repositories": ["central"],
+ "dependencies": ["io.get-coursier:echo:latest.stable"]
+}
+'''
+
+ channel_dir = tmp_path.joinpath('.pre-commit-channel')
+ channel_dir.mkdir()
+ channel_dir.joinpath('echo-java.json').write_text(echo_java_json)
+
+ ret = run_language(
+ tmp_path,
+ coursier,
+ 'echo-java',
+ args=('Hello', 'World', 'from', 'coursier'),
+ )
+ assert ret == (0, b'Hello World from coursier\n')
+
+
+def test_coursier_hook_additional_dependencies(tmp_path):
+ ret = run_language(
+ tmp_path,
+ coursier,
+ 'scalafmt --version',
+ deps=('scalafmt:3.6.1',),
+ )
+ assert ret == (0, b'scalafmt 3.6.1\n')
+
+
+def test_error_if_no_deps_or_channel(tmp_path):
+ with pytest.raises(FatalError) as excinfo:
+ run_language(tmp_path, coursier, 'dne')
+ msg, = excinfo.value.args
+ assert msg == 'expected .pre-commit-channel dir or additional_dependencies'
diff --git a/tests/languages/dart_test.py b/tests/languages/dart_test.py
new file mode 100644
index 0000000..5bb5aa6
--- /dev/null
+++ b/tests/languages/dart_test.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import re_assert
+
+from pre_commit.languages import dart
+from pre_commit.store import _make_local_repo
+from testing.language_helpers import run_language
+
+
+def test_dart(tmp_path):
+ pubspec_yaml = '''\
+environment:
+ sdk: '>=2.10.0 <3.0.0'
+
+name: hello_world_dart
+
+executables:
+ hello-world-dart:
+
+dependencies:
+ ansicolor: ^2.0.1
+'''
+ hello_world_dart_dart = '''\
+import 'package:ansicolor/ansicolor.dart';
+
+void main() {
+ AnsiPen pen = new AnsiPen()..red();
+ print("hello hello " + pen("world"));
+}
+'''
+ tmp_path.joinpath('pubspec.yaml').write_text(pubspec_yaml)
+ bin_dir = tmp_path.joinpath('bin')
+ bin_dir.mkdir()
+ bin_dir.joinpath('hello-world-dart.dart').write_text(hello_world_dart_dart)
+
+ expected = (0, b'hello hello world\n')
+ assert run_language(tmp_path, dart, 'hello-world-dart') == expected
+
+
+def test_dart_additional_deps(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ ret = run_language(
+ tmp_path,
+ dart,
+ 'hello-world-dart',
+ deps=('hello_world_dart',),
+ )
+ assert ret == (0, b'hello hello world\n')
+
+
+def test_dart_additional_deps_versioned(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ ret, out = run_language(
+ tmp_path,
+ dart,
+ 'secure-random -l 4 -b 16',
+ deps=('encrypt:5.0.0',),
+ )
+ assert ret == 0
+ re_assert.Matches('^[a-f0-9]{8}\n$').assert_matches(out.decode())
diff --git a/tests/languages/docker_image_test.py b/tests/languages/docker_image_test.py
new file mode 100644
index 0000000..7993c11
--- /dev/null
+++ b/tests/languages/docker_image_test.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+
+from pre_commit.languages import docker_image
+from testing.language_helpers import run_language
+from testing.util import xfailif_windows
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_docker_image_hook_via_entrypoint(tmp_path):
+ ret = run_language(
+ tmp_path,
+ docker_image,
+ '--entrypoint echo ubuntu:22.04',
+ args=('hello hello world',),
+ )
+ assert ret == (0, b'hello hello world\n')
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_docker_image_hook_via_args(tmp_path):
+ ret = run_language(
+ tmp_path,
+ docker_image,
+ 'ubuntu:22.04 echo',
+ args=('hello hello world',),
+ )
+ assert ret == (0, b'hello hello world\n')
diff --git a/tests/languages/docker_test.py b/tests/languages/docker_test.py
new file mode 100644
index 0000000..836382a
--- /dev/null
+++ b/tests/languages/docker_test.py
@@ -0,0 +1,197 @@
+from __future__ import annotations
+
+import builtins
+import json
+import ntpath
+import os.path
+import posixpath
+from unittest import mock
+
+import pytest
+
+from pre_commit.languages import docker
+from pre_commit.util import CalledProcessError
+from testing.language_helpers import run_language
+from testing.util import xfailif_windows
+
+DOCKER_CGROUP_EXAMPLE = b'''\
+12:hugetlb:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+11:blkio:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+10:freezer:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+9:cpu,cpuacct:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+8:pids:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+7:rdma:/
+6:net_cls,net_prio:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+5:cpuset:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+4:devices:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+3:memory:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+2:perf_event:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+1:name=systemd:/docker/c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7
+0::/system.slice/containerd.service
+''' # noqa: E501
+
+# The ID should match the above cgroup example.
+CONTAINER_ID = 'c33988ec7651ebc867cb24755eaf637a6734088bc7eef59d5799293a9e5450f7' # noqa: E501
+
+NON_DOCKER_CGROUP_EXAMPLE = b'''\
+12:perf_event:/
+11:hugetlb:/
+10:devices:/
+9:blkio:/
+8:rdma:/
+7:cpuset:/
+6:cpu,cpuacct:/
+5:freezer:/
+4:memory:/
+3:pids:/
+2:net_cls,net_prio:/
+1:name=systemd:/init.scope
+0::/init.scope
+'''
+
+
+def test_docker_fallback_user():
+ def invalid_attribute():
+ raise AttributeError
+
+ with mock.patch.multiple(
+ 'os', create=True,
+ getuid=invalid_attribute,
+ getgid=invalid_attribute,
+ ):
+ assert docker.get_docker_user() == ()
+
+
+def test_in_docker_no_file():
+ with mock.patch.object(builtins, 'open', side_effect=FileNotFoundError):
+ assert docker._is_in_docker() is False
+
+
+def _mock_open(data):
+ return mock.patch.object(
+ builtins,
+ 'open',
+ new_callable=mock.mock_open,
+ read_data=data,
+ )
+
+
+def test_in_docker_docker_in_file():
+ with _mock_open(DOCKER_CGROUP_EXAMPLE):
+ assert docker._is_in_docker() is True
+
+
+def test_in_docker_docker_not_in_file():
+ with _mock_open(NON_DOCKER_CGROUP_EXAMPLE):
+ assert docker._is_in_docker() is False
+
+
+def test_get_container_id():
+ with _mock_open(DOCKER_CGROUP_EXAMPLE):
+ assert docker._get_container_id() == CONTAINER_ID
+
+
+def test_get_container_id_failure():
+ with _mock_open(b''), pytest.raises(RuntimeError):
+ docker._get_container_id()
+
+
+def test_get_docker_path_not_in_docker_returns_same():
+ with mock.patch.object(docker, '_is_in_docker', return_value=False):
+ assert docker._get_docker_path('abc') == 'abc'
+
+
+@pytest.fixture
+def in_docker():
+ with mock.patch.object(docker, '_is_in_docker', return_value=True):
+ with mock.patch.object(
+ docker, '_get_container_id', return_value=CONTAINER_ID,
+ ):
+ yield
+
+
+def _linux_commonpath():
+ return mock.patch.object(os.path, 'commonpath', posixpath.commonpath)
+
+
+def _nt_commonpath():
+ return mock.patch.object(os.path, 'commonpath', ntpath.commonpath)
+
+
+def _docker_output(out):
+ ret = (0, out, b'')
+ return mock.patch.object(docker, 'cmd_output_b', return_value=ret)
+
+
+def test_get_docker_path_in_docker_no_binds_same_path(in_docker):
+ docker_out = json.dumps([{'Mounts': []}]).encode()
+
+ with _docker_output(docker_out):
+ assert docker._get_docker_path('abc') == 'abc'
+
+
+def test_get_docker_path_in_docker_binds_path_equal(in_docker):
+ binds_list = [{'Source': '/opt/my_code', 'Destination': '/project'}]
+ docker_out = json.dumps([{'Mounts': binds_list}]).encode()
+
+ with _linux_commonpath(), _docker_output(docker_out):
+ assert docker._get_docker_path('/project') == '/opt/my_code'
+
+
+def test_get_docker_path_in_docker_binds_path_complex(in_docker):
+ binds_list = [{'Source': '/opt/my_code', 'Destination': '/project'}]
+ docker_out = json.dumps([{'Mounts': binds_list}]).encode()
+
+ with _linux_commonpath(), _docker_output(docker_out):
+ path = '/project/test/something'
+ assert docker._get_docker_path(path) == '/opt/my_code/test/something'
+
+
+def test_get_docker_path_in_docker_no_substring(in_docker):
+ binds_list = [{'Source': '/opt/my_code', 'Destination': '/project'}]
+ docker_out = json.dumps([{'Mounts': binds_list}]).encode()
+
+ with _linux_commonpath(), _docker_output(docker_out):
+ path = '/projectSuffix/test/something'
+ assert docker._get_docker_path(path) == path
+
+
+def test_get_docker_path_in_docker_binds_path_many_binds(in_docker):
+ binds_list = [
+ {'Source': '/something_random', 'Destination': '/not-related'},
+ {'Source': '/opt/my_code', 'Destination': '/project'},
+ {'Source': '/something-random-2', 'Destination': '/not-related-2'},
+ ]
+ docker_out = json.dumps([{'Mounts': binds_list}]).encode()
+
+ with _linux_commonpath(), _docker_output(docker_out):
+ assert docker._get_docker_path('/project') == '/opt/my_code'
+
+
+def test_get_docker_path_in_docker_windows(in_docker):
+ binds_list = [{'Source': r'c:\users\user', 'Destination': r'c:\folder'}]
+ docker_out = json.dumps([{'Mounts': binds_list}]).encode()
+
+ with _nt_commonpath(), _docker_output(docker_out):
+ path = r'c:\folder\test\something'
+ expected = r'c:\users\user\test\something'
+ assert docker._get_docker_path(path) == expected
+
+
+def test_get_docker_path_in_docker_docker_in_docker(in_docker):
+ # won't be able to discover "self" container in true docker-in-docker
+ err = CalledProcessError(1, (), b'', b'')
+ with mock.patch.object(docker, 'cmd_output_b', side_effect=err):
+ assert docker._get_docker_path('/project') == '/project'
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_docker_hook(tmp_path):
+ dockerfile = '''\
+FROM ubuntu:22.04
+CMD ["echo", "This is overwritten by the entry"']
+'''
+ tmp_path.joinpath('Dockerfile').write_text(dockerfile)
+
+ ret = run_language(tmp_path, docker, 'echo hello hello world')
+ assert ret == (0, b'hello hello world\n')
diff --git a/tests/languages/dotnet_test.py b/tests/languages/dotnet_test.py
new file mode 100644
index 0000000..470c03b
--- /dev/null
+++ b/tests/languages/dotnet_test.py
@@ -0,0 +1,154 @@
+from __future__ import annotations
+
+from pre_commit.languages import dotnet
+from testing.language_helpers import run_language
+
+
+def _write_program_cs(tmp_path):
+ program_cs = '''\
+using System;
+
+namespace dotnet_tests
+{
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ Console.WriteLine("Hello from dotnet!");
+ }
+ }
+}
+'''
+ tmp_path.joinpath('Program.cs').write_text(program_cs)
+
+
+def _csproj(tool_name):
+ return f'''\
+<Project Sdk="Microsoft.NET.Sdk">
+ <PropertyGroup>
+ <OutputType>Exe</OutputType>
+ <TargetFramework>net6</TargetFramework>
+ <PackAsTool>true</PackAsTool>
+ <ToolCommandName>{tool_name}</ToolCommandName>
+ <PackageOutputPath>./nupkg</PackageOutputPath>
+ </PropertyGroup>
+</Project>
+'''
+
+
+def test_dotnet_csproj(tmp_path):
+ csproj = _csproj('testeroni')
+ _write_program_cs(tmp_path)
+ tmp_path.joinpath('dotnet_csproj.csproj').write_text(csproj)
+ ret = run_language(tmp_path, dotnet, 'testeroni')
+ assert ret == (0, b'Hello from dotnet!\n')
+
+
+def test_dotnet_csproj_prefix(tmp_path):
+ csproj = _csproj('testeroni.tool')
+ _write_program_cs(tmp_path)
+ tmp_path.joinpath('dotnet_hooks_csproj_prefix.csproj').write_text(csproj)
+ ret = run_language(tmp_path, dotnet, 'testeroni.tool')
+ assert ret == (0, b'Hello from dotnet!\n')
+
+
+def test_dotnet_sln(tmp_path):
+ csproj = _csproj('testeroni')
+ sln = '''\
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 15
+VisualStudioVersion = 15.0.26124.0
+MinimumVisualStudioVersion = 15.0.26124.0
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "dotnet_hooks_sln_repo", "dotnet_hooks_sln_repo.csproj", "{6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|Any CPU = Release|Any CPU
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|x64.Build.0 = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Debug|x86.Build.0 = Debug|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|x64.ActiveCfg = Release|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|x64.Build.0 = Release|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|x86.ActiveCfg = Release|Any CPU
+ {6568CFDB-6F6F-45A9-932C-8C7DAABC8E56}.Release|x86.Build.0 = Release|Any CPU
+ EndGlobalSection
+EndGlobal
+''' # noqa: E501
+ _write_program_cs(tmp_path)
+ tmp_path.joinpath('dotnet_hooks_sln_repo.csproj').write_text(csproj)
+ tmp_path.joinpath('dotnet_hooks_sln_repo.sln').write_text(sln)
+
+ ret = run_language(tmp_path, dotnet, 'testeroni')
+ assert ret == (0, b'Hello from dotnet!\n')
+
+
+def _setup_dotnet_combo(tmp_path):
+ sln = '''\
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.30114.105
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "proj1", "proj1\\proj1.csproj", "{38A939C3-DEA4-47D7-9B75-0418C4249662}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "proj2", "proj2\\proj2.csproj", "{4C9916CB-165C-4EF5-8A57-4CB6794C1EBF}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {38A939C3-DEA4-47D7-9B75-0418C4249662}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {38A939C3-DEA4-47D7-9B75-0418C4249662}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {38A939C3-DEA4-47D7-9B75-0418C4249662}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {38A939C3-DEA4-47D7-9B75-0418C4249662}.Release|Any CPU.Build.0 = Release|Any CPU
+ {4C9916CB-165C-4EF5-8A57-4CB6794C1EBF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {4C9916CB-165C-4EF5-8A57-4CB6794C1EBF}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {4C9916CB-165C-4EF5-8A57-4CB6794C1EBF}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {4C9916CB-165C-4EF5-8A57-4CB6794C1EBF}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+EndGlobal
+''' # noqa: E501
+ tmp_path.joinpath('dotnet_hooks_combo_repo.sln').write_text(sln)
+
+ csproj1 = _csproj('proj1')
+ proj1 = tmp_path.joinpath('proj1')
+ proj1.mkdir()
+ proj1.joinpath('proj1.csproj').write_text(csproj1)
+ _write_program_cs(proj1)
+
+ csproj2 = _csproj('proj2')
+ proj2 = tmp_path.joinpath('proj2')
+ proj2.mkdir()
+ proj2.joinpath('proj2.csproj').write_text(csproj2)
+ _write_program_cs(proj2)
+
+
+def test_dotnet_combo_proj1(tmp_path):
+ _setup_dotnet_combo(tmp_path)
+ ret = run_language(tmp_path, dotnet, 'proj1')
+ assert ret == (0, b'Hello from dotnet!\n')
+
+
+def test_dotnet_combo_proj2(tmp_path):
+ _setup_dotnet_combo(tmp_path)
+ ret = run_language(tmp_path, dotnet, 'proj2')
+ assert ret == (0, b'Hello from dotnet!\n')
diff --git a/tests/languages/fail_test.py b/tests/languages/fail_test.py
new file mode 100644
index 0000000..7c74886
--- /dev/null
+++ b/tests/languages/fail_test.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+from pre_commit.languages import fail
+from testing.language_helpers import run_language
+
+
+def test_fail_hooks(tmp_path):
+ ret = run_language(
+ tmp_path,
+ fail,
+ 'watch out for',
+ file_args=('bunnies',),
+ )
+ assert ret == (1, b'watch out for\n\nbunnies\n')
diff --git a/tests/languages/golang_test.py b/tests/languages/golang_test.py
new file mode 100644
index 0000000..02e35d7
--- /dev/null
+++ b/tests/languages/golang_test.py
@@ -0,0 +1,167 @@
+from __future__ import annotations
+
+from unittest import mock
+
+import pytest
+import re_assert
+
+import pre_commit.constants as C
+from pre_commit import lang_base
+from pre_commit.commands.install_uninstall import install
+from pre_commit.envcontext import envcontext
+from pre_commit.languages import golang
+from pre_commit.store import _make_local_repo
+from pre_commit.util import cmd_output
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import make_config_from_repo
+from testing.language_helpers import run_language
+from testing.util import cmd_output_mocked_pre_commit_home
+from testing.util import git_commit
+
+
+ACTUAL_GET_DEFAULT_VERSION = golang.get_default_version.__wrapped__
+
+
+@pytest.fixture
+def exe_exists_mck():
+ with mock.patch.object(lang_base, 'exe_exists') as mck:
+ yield mck
+
+
+def test_golang_default_version_system_available(exe_exists_mck):
+ exe_exists_mck.return_value = True
+ assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
+
+
+def test_golang_default_version_system_not_available(exe_exists_mck):
+ exe_exists_mck.return_value = False
+ assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
+
+
+ACTUAL_INFER_GO_VERSION = golang._infer_go_version.__wrapped__
+
+
+def test_golang_infer_go_version_not_default():
+ assert ACTUAL_INFER_GO_VERSION('1.19.4') == '1.19.4'
+
+
+def test_golang_infer_go_version_default():
+ version = ACTUAL_INFER_GO_VERSION(C.DEFAULT)
+
+ assert version != C.DEFAULT
+ re_assert.Matches(r'^\d+\.\d+(?:\.\d+)?$').assert_matches(version)
+
+
+def _make_hello_world(tmp_path):
+ go_mod = '''\
+module golang-hello-world
+
+go 1.18
+
+require github.com/BurntSushi/toml v1.1.0
+'''
+ go_sum = '''\
+github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
+github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+''' # noqa: E501
+ hello_world_go = '''\
+package main
+
+
+import (
+ "fmt"
+ "github.com/BurntSushi/toml"
+)
+
+type Config struct {
+ What string
+}
+
+func main() {
+ var conf Config
+ toml.Decode("What = 'world'\\n", &conf)
+ fmt.Printf("hello %v\\n", conf.What)
+}
+'''
+ tmp_path.joinpath('go.mod').write_text(go_mod)
+ tmp_path.joinpath('go.sum').write_text(go_sum)
+ mod_dir = tmp_path.joinpath('golang-hello-world')
+ mod_dir.mkdir()
+ main_file = mod_dir.joinpath('main.go')
+ main_file.write_text(hello_world_go)
+
+
+def test_golang_system(tmp_path):
+ _make_hello_world(tmp_path)
+
+ ret = run_language(tmp_path, golang, 'golang-hello-world')
+ assert ret == (0, b'hello world\n')
+
+
+def test_golang_default_version(tmp_path):
+ _make_hello_world(tmp_path)
+
+ ret = run_language(
+ tmp_path,
+ golang,
+ 'golang-hello-world',
+ version=C.DEFAULT,
+ )
+ assert ret == (0, b'hello world\n')
+
+
+def test_golang_versioned(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ ret, out = run_language(
+ tmp_path,
+ golang,
+ 'go version',
+ version='1.21.1',
+ )
+
+ assert ret == 0
+ assert out.startswith(b'go version go1.21.1')
+
+
+def test_local_golang_additional_deps(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ ret = run_language(
+ tmp_path,
+ golang,
+ 'hello',
+ deps=('golang.org/x/example/hello@latest',),
+ )
+
+ assert ret == (0, b'Hello, world!\n')
+
+
+def test_golang_hook_still_works_when_gobin_is_set(tmp_path):
+ with envcontext((('GOBIN', str(tmp_path.joinpath('gobin'))),)):
+ test_golang_system(tmp_path)
+
+
+def test_during_commit_all(tmp_path, tempdir_factory, store, in_git_dir):
+ hook_dir = tmp_path.joinpath('hook')
+ hook_dir.mkdir()
+ _make_hello_world(hook_dir)
+ hook_dir.joinpath('.pre-commit-hooks.yaml').write_text(
+ '- id: hello-world\n'
+ ' name: hello world\n'
+ ' entry: golang-hello-world\n'
+ ' language: golang\n'
+ ' always_run: true\n',
+ )
+ cmd_output('git', 'init', hook_dir)
+ cmd_output('git', 'add', '.', cwd=hook_dir)
+ git_commit(cwd=hook_dir)
+
+ add_config_to_repo(in_git_dir, make_config_from_repo(hook_dir))
+
+ assert not install(C.CONFIG_FILE, store, hook_types=['pre-commit'])
+
+ git_commit(
+ fn=cmd_output_mocked_pre_commit_home,
+ tempdir_factory=tempdir_factory,
+ )
diff --git a/tests/languages/haskell_test.py b/tests/languages/haskell_test.py
new file mode 100644
index 0000000..f888109
--- /dev/null
+++ b/tests/languages/haskell_test.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+import pytest
+
+from pre_commit.errors import FatalError
+from pre_commit.languages import haskell
+from pre_commit.util import win_exe
+from testing.language_helpers import run_language
+
+
+def test_run_example_executable(tmp_path):
+ example_cabal = '''\
+cabal-version: 2.4
+name: example
+version: 0.1.0.0
+
+executable example
+ main-is: Main.hs
+
+ build-depends: base >=4
+ default-language: Haskell2010
+'''
+ main_hs = '''\
+module Main where
+
+main :: IO ()
+main = putStrLn "Hello, Haskell!"
+'''
+ tmp_path.joinpath('example.cabal').write_text(example_cabal)
+ tmp_path.joinpath('Main.hs').write_text(main_hs)
+
+ result = run_language(tmp_path, haskell, 'example')
+ assert result == (0, b'Hello, Haskell!\n')
+
+ # should not symlink things into environments
+ exe = tmp_path.joinpath(win_exe('hs_env-default/bin/example'))
+ assert exe.is_file()
+ assert not exe.is_symlink()
+
+
+def test_run_dep(tmp_path):
+ result = run_language(tmp_path, haskell, 'hello', deps=['hello'])
+ assert result == (0, b'Hello, World!\n')
+
+
+def test_run_empty(tmp_path):
+ with pytest.raises(FatalError) as excinfo:
+ run_language(tmp_path, haskell, 'example')
+ msg, = excinfo.value.args
+ assert msg == 'Expected .cabal files or additional_dependencies'
diff --git a/tests/languages/lua_test.py b/tests/languages/lua_test.py
new file mode 100644
index 0000000..b2767b7
--- /dev/null
+++ b/tests/languages/lua_test.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import sys
+
+import pytest
+
+from pre_commit.languages import lua
+from pre_commit.util import make_executable
+from testing.language_helpers import run_language
+
+pytestmark = pytest.mark.skipif(
+ sys.platform == 'win32',
+ reason='lua is not supported on windows',
+)
+
+
+def test_lua(tmp_path): # pragma: win32 no cover
+ rockspec = '''\
+package = "hello"
+version = "dev-1"
+
+source = {
+ url = "git+ssh://git@github.com/pre-commit/pre-commit.git"
+}
+description = {}
+dependencies = {}
+build = {
+ type = "builtin",
+ modules = {},
+ install = {
+ bin = {"bin/hello-world-lua"}
+ },
+}
+'''
+ hello_world_lua = '''\
+#!/usr/bin/env lua
+print('hello world')
+'''
+ tmp_path.joinpath('hello-dev-1.rockspec').write_text(rockspec)
+ bin_dir = tmp_path.joinpath('bin')
+ bin_dir.mkdir()
+ bin_file = bin_dir.joinpath('hello-world-lua')
+ bin_file.write_text(hello_world_lua)
+ make_executable(bin_file)
+
+ expected = (0, b'hello world\n')
+ assert run_language(tmp_path, lua, 'hello-world-lua') == expected
+
+
+def test_lua_additional_dependencies(tmp_path): # pragma: win32 no cover
+ ret, out = run_language(
+ tmp_path,
+ lua,
+ 'luacheck --version',
+ deps=('luacheck',),
+ )
+ assert ret == 0
+ assert out.startswith(b'Luacheck: ')
diff --git a/tests/languages/node_test.py b/tests/languages/node_test.py
new file mode 100644
index 0000000..055cb1e
--- /dev/null
+++ b/tests/languages/node_test.py
@@ -0,0 +1,152 @@
+from __future__ import annotations
+
+import json
+import os
+import shutil
+import sys
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import envcontext
+from pre_commit import parse_shebang
+from pre_commit.languages import node
+from pre_commit.prefix import Prefix
+from pre_commit.store import _make_local_repo
+from pre_commit.util import cmd_output
+from testing.language_helpers import run_language
+from testing.util import xfailif_windows
+
+
+ACTUAL_GET_DEFAULT_VERSION = node.get_default_version.__wrapped__
+
+
+@pytest.fixture
+def is_linux():
+ with mock.patch.object(sys, 'platform', 'linux'):
+ yield
+
+
+@pytest.fixture
+def is_win32():
+ with mock.patch.object(sys, 'platform', 'win32'):
+ yield
+
+
+@pytest.fixture
+def find_exe_mck():
+ with mock.patch.object(parse_shebang, 'find_executable') as mck:
+ yield mck
+
+
+@pytest.mark.usefixtures('is_linux')
+def test_sets_system_when_node_and_npm_are_available(find_exe_mck):
+ find_exe_mck.return_value = '/path/to/exe'
+ assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
+
+
+@pytest.mark.usefixtures('is_linux')
+def test_uses_default_when_node_and_npm_are_not_available(find_exe_mck):
+ find_exe_mck.return_value = None
+ assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
+
+
+@pytest.mark.usefixtures('is_win32')
+def test_sets_default_on_windows(find_exe_mck):
+ find_exe_mck.return_value = '/path/to/exe'
+ assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_healthy_system_node(tmpdir):
+ tmpdir.join('package.json').write('{"name": "t", "version": "1.0.0"}')
+
+ prefix = Prefix(str(tmpdir))
+ node.install_environment(prefix, 'system', ())
+ assert node.health_check(prefix, 'system') is None
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_unhealthy_if_system_node_goes_missing(tmpdir):
+ bin_dir = tmpdir.join('bin').ensure_dir()
+ node_bin = bin_dir.join('node')
+ node_bin.mksymlinkto(shutil.which('node'))
+
+ prefix_dir = tmpdir.join('prefix').ensure_dir()
+ prefix_dir.join('package.json').write('{"name": "t", "version": "1.0.0"}')
+
+ path = ('PATH', (str(bin_dir), os.pathsep, envcontext.Var('PATH')))
+ with envcontext.envcontext((path,)):
+ prefix = Prefix(str(prefix_dir))
+ node.install_environment(prefix, 'system', ())
+ assert node.health_check(prefix, 'system') is None
+
+ node_bin.remove()
+ ret = node.health_check(prefix, 'system')
+ assert ret == '`node --version` returned 127'
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_installs_without_links_outside_env(tmpdir):
+ tmpdir.join('bin/main.js').ensure().write(
+ '#!/usr/bin/env node\n'
+ '_ = require("lodash"); console.log("success!")\n',
+ )
+ tmpdir.join('package.json').write(
+ json.dumps({
+ 'name': 'foo',
+ 'version': '0.0.1',
+ 'bin': {'foo': './bin/main.js'},
+ 'dependencies': {'lodash': '*'},
+ }),
+ )
+
+ prefix = Prefix(str(tmpdir))
+ node.install_environment(prefix, 'system', ())
+ assert node.health_check(prefix, 'system') is None
+
+ # this directory shouldn't exist, make sure we succeed without it existing
+ cmd_output('rm', '-rf', str(tmpdir.join('node_modules')))
+
+ with node.in_env(prefix, 'system'):
+ assert cmd_output('foo')[1] == 'success!\n'
+
+
+def _make_hello_world(tmp_path):
+ package_json = '''\
+{"name": "t", "version": "0.0.1", "bin": {"node-hello": "./bin/main.js"}}
+'''
+ tmp_path.joinpath('package.json').write_text(package_json)
+ bin_dir = tmp_path.joinpath('bin')
+ bin_dir.mkdir()
+ bin_dir.joinpath('main.js').write_text(
+ '#!/usr/bin/env node\n'
+ 'console.log("Hello World");\n',
+ )
+
+
+def test_node_hook_system(tmp_path):
+ _make_hello_world(tmp_path)
+ ret = run_language(tmp_path, node, 'node-hello')
+ assert ret == (0, b'Hello World\n')
+
+
+def test_node_with_user_config_set(tmp_path):
+ cfg = tmp_path.joinpath('cfg')
+ cfg.write_text('cache=/dne\n')
+ with envcontext.envcontext((('NPM_CONFIG_USERCONFIG', str(cfg)),)):
+ test_node_hook_system(tmp_path)
+
+
+@pytest.mark.parametrize('version', (C.DEFAULT, '18.14.0'))
+def test_node_hook_versions(tmp_path, version):
+ _make_hello_world(tmp_path)
+ ret = run_language(tmp_path, node, 'node-hello', version=version)
+ assert ret == (0, b'Hello World\n')
+
+
+def test_node_additional_deps(tmp_path):
+ _make_local_repo(str(tmp_path))
+ ret, out = run_language(tmp_path, node, 'npm ls -g', deps=('lodash',))
+ assert b' lodash@' in out
diff --git a/tests/languages/perl_test.py b/tests/languages/perl_test.py
new file mode 100644
index 0000000..042478d
--- /dev/null
+++ b/tests/languages/perl_test.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from pre_commit.languages import perl
+from pre_commit.store import _make_local_repo
+from pre_commit.util import make_executable
+from testing.language_helpers import run_language
+
+
+def test_perl_install(tmp_path):
+ makefile_pl = '''\
+use strict;
+use warnings;
+
+use ExtUtils::MakeMaker;
+
+WriteMakefile(
+ NAME => "PreCommitHello",
+ VERSION_FROM => "lib/PreCommitHello.pm",
+ EXE_FILES => [qw(bin/pre-commit-perl-hello)],
+);
+'''
+ bin_perl_hello = '''\
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use PreCommitHello;
+
+PreCommitHello::hello();
+'''
+ lib_hello_pm = '''\
+package PreCommitHello;
+
+use strict;
+use warnings;
+
+our $VERSION = "0.1.0";
+
+sub hello {
+ print "Hello from perl-commit Perl!\n";
+}
+
+1;
+'''
+ tmp_path.joinpath('Makefile.PL').write_text(makefile_pl)
+ bin_dir = tmp_path.joinpath('bin')
+ bin_dir.mkdir()
+ exe = bin_dir.joinpath('pre-commit-perl-hello')
+ exe.write_text(bin_perl_hello)
+ make_executable(exe)
+ lib_dir = tmp_path.joinpath('lib')
+ lib_dir.mkdir()
+ lib_dir.joinpath('PreCommitHello.pm').write_text(lib_hello_pm)
+
+ ret = run_language(tmp_path, perl, 'pre-commit-perl-hello')
+ assert ret == (0, b'Hello from perl-commit Perl!\n')
+
+
+def test_perl_additional_dependencies(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ ret, out = run_language(
+ tmp_path,
+ perl,
+ 'perltidy --version',
+ deps=('SHANCOCK/Perl-Tidy-20211029.tar.gz',),
+ )
+ assert ret == 0
+ assert out.startswith(b'This is perltidy, v20211029')
diff --git a/tests/languages/pygrep_test.py b/tests/languages/pygrep_test.py
new file mode 100644
index 0000000..c6271c8
--- /dev/null
+++ b/tests/languages/pygrep_test.py
@@ -0,0 +1,144 @@
+from __future__ import annotations
+
+import pytest
+
+from pre_commit.languages import pygrep
+from testing.language_helpers import run_language
+
+
+@pytest.fixture
+def some_files(tmpdir):
+ tmpdir.join('f1').write_binary(b'foo\nbar\n')
+ tmpdir.join('f2').write_binary(b'[INFO] hi\n')
+ tmpdir.join('f3').write_binary(b"with'quotes\n")
+ tmpdir.join('f4').write_binary(b'foo\npattern\nbar\n')
+ tmpdir.join('f5').write_binary(b'[INFO] hi\npattern\nbar')
+ tmpdir.join('f6').write_binary(b"pattern\nbarwith'foo\n")
+ tmpdir.join('f7').write_binary(b"hello'hi\nworld\n")
+ tmpdir.join('f8').write_binary(b'foo\nbar\nbaz\n')
+ tmpdir.join('f9').write_binary(b'[WARN] hi\n')
+ with tmpdir.as_cwd():
+ yield
+
+
+@pytest.mark.usefixtures('some_files')
+@pytest.mark.parametrize(
+ ('pattern', 'expected_retcode', 'expected_out'),
+ (
+ ('baz', 0, ''),
+ ('foo', 1, 'f1:1:foo\n'),
+ ('bar', 1, 'f1:2:bar\n'),
+ (r'(?i)\[info\]', 1, 'f2:1:[INFO] hi\n'),
+ ("h'q", 1, "f3:1:with'quotes\n"),
+ ),
+)
+def test_main(cap_out, pattern, expected_retcode, expected_out):
+ ret = pygrep.main((pattern, 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == expected_retcode
+ assert out == expected_out
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_line_no_match(cap_out):
+ ret = pygrep.main(('pattern\nbar', 'f4', 'f5', 'f6', '--negate'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f4\nf5\nf6\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_line_two_match(cap_out):
+ ret = pygrep.main(('foo', 'f4', 'f5', 'f6', '--negate'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f5\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_line_all_match(cap_out):
+ ret = pygrep.main(('pattern', 'f4', 'f5', 'f6', '--negate'))
+ out = cap_out.get()
+ assert ret == 0
+ assert out == ''
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_file_no_match(cap_out):
+ ret = pygrep.main(('baz', 'f4', 'f5', 'f6', '--negate', '--multiline'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f4\nf5\nf6\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_file_one_match(cap_out):
+ ret = pygrep.main(
+ ('foo\npattern', 'f4', 'f5', 'f6', '--negate', '--multiline'),
+ )
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f5\nf6\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_negate_by_file_all_match(cap_out):
+ ret = pygrep.main(
+ ('pattern\nbar', 'f4', 'f5', 'f6', '--negate', '--multiline'),
+ )
+ out = cap_out.get()
+ assert ret == 0
+ assert out == ''
+
+
+@pytest.mark.usefixtures('some_files')
+def test_ignore_case(cap_out):
+ ret = pygrep.main(('--ignore-case', 'info', 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f2:1:[INFO] hi\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_multiline(cap_out):
+ ret = pygrep.main(('--multiline', r'foo\nbar', 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f1:1:foo\nbar\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_multiline_line_number(cap_out):
+ ret = pygrep.main(('--multiline', r'ar', 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f1:2:bar\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_multiline_dotall_flag_is_enabled(cap_out):
+ ret = pygrep.main(('--multiline', r'o.*bar', 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f1:1:foo\nbar\n'
+
+
+@pytest.mark.usefixtures('some_files')
+def test_multiline_multiline_flag_is_enabled(cap_out):
+ ret = pygrep.main(('--multiline', r'foo$.*bar', 'f1', 'f2', 'f3'))
+ out = cap_out.get()
+ assert ret == 1
+ assert out == 'f1:1:foo\nbar\n'
+
+
+def test_grep_hook_matching(some_files, tmp_path):
+ ret = run_language(
+ tmp_path, pygrep, 'ello', file_args=('f7', 'f8', 'f9'),
+ )
+ assert ret == (1, b"f7:1:hello'hi\n")
+
+
+@pytest.mark.parametrize('regex', ('nope', "foo'bar", r'^\[INFO\]'))
+def test_grep_hook_not_matching(regex, some_files, tmp_path):
+ ret = run_language(tmp_path, pygrep, regex, file_args=('f7', 'f8', 'f9'))
+ assert ret == (0, b'')
diff --git a/tests/languages/python_test.py b/tests/languages/python_test.py
new file mode 100644
index 0000000..ab26e14
--- /dev/null
+++ b/tests/languages/python_test.py
@@ -0,0 +1,286 @@
+from __future__ import annotations
+
+import os.path
+import sys
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit.envcontext import envcontext
+from pre_commit.languages import python
+from pre_commit.prefix import Prefix
+from pre_commit.util import make_executable
+from pre_commit.util import win_exe
+from testing.language_helpers import run_language
+
+
+def test_read_pyvenv_cfg(tmpdir):
+ pyvenv_cfg = tmpdir.join('pyvenv.cfg')
+ pyvenv_cfg.write(
+ '# I am a comment\n'
+ '\n'
+ 'foo = bar\n'
+ 'version-info=123\n',
+ )
+ expected = {'foo': 'bar', 'version-info': '123'}
+ assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
+
+
+def test_read_pyvenv_cfg_non_utf8(tmpdir):
+ pyvenv_cfg = tmpdir.join('pyvenv_cfg')
+ pyvenv_cfg.write_binary('hello = hello john.Ε‘\n'.encode())
+ expected = {'hello': 'hello john.Ε‘'}
+ assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
+
+
+def test_norm_version_expanduser():
+ home = os.path.expanduser('~')
+ if sys.platform == 'win32': # pragma: win32 cover
+ path = r'~\python343'
+ expected_path = fr'{home}\python343'
+ else: # pragma: win32 no cover
+ path = '~/.pyenv/versions/3.4.3/bin/python'
+ expected_path = f'{home}/.pyenv/versions/3.4.3/bin/python'
+ result = python.norm_version(path)
+ assert result == expected_path
+
+
+def test_norm_version_of_default_is_sys_executable():
+ assert python.norm_version('default') is None
+
+
+@pytest.mark.parametrize('v', ('python3.9', 'python3', 'python'))
+def test_sys_executable_matches(v):
+ with mock.patch.object(sys, 'version_info', (3, 9, 10)):
+ assert python._sys_executable_matches(v)
+ assert python.norm_version(v) is None
+
+
+@pytest.mark.parametrize('v', ('notpython', 'python3.x'))
+def test_sys_executable_matches_does_not_match(v):
+ with mock.patch.object(sys, 'version_info', (3, 9, 10)):
+ assert not python._sys_executable_matches(v)
+
+
+@pytest.mark.parametrize(
+ ('exe', 'realpath', 'expected'), (
+ ('/usr/bin/python3', '/usr/bin/python3.7', 'python3'),
+ ('/usr/bin/python', '/usr/bin/python3.7', 'python3.7'),
+ ('/usr/bin/python', '/usr/bin/python', None),
+ ('/usr/bin/python3.7m', '/usr/bin/python3.7m', 'python3.7m'),
+ ('v/bin/python', 'v/bin/pypy', 'pypy'),
+ ),
+)
+def test_find_by_sys_executable(exe, realpath, expected):
+ with mock.patch.object(sys, 'executable', exe):
+ with mock.patch.object(os.path, 'realpath', return_value=realpath):
+ with mock.patch.object(python, 'find_executable', lambda x: x):
+ assert python._find_by_sys_executable() == expected
+
+
+@pytest.fixture
+def python_dir(tmpdir):
+ with tmpdir.as_cwd():
+ prefix = tmpdir.join('prefix').ensure_dir()
+ prefix.join('setup.py').write('import setuptools; setuptools.setup()')
+ prefix = Prefix(str(prefix))
+ yield prefix, tmpdir
+
+
+def test_healthy_default_creator(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ # should be healthy right after creation
+ assert python.health_check(prefix, C.DEFAULT) is None
+
+ # even if a `types.py` file exists, should still be healthy
+ tmpdir.join('types.py').ensure()
+ assert python.health_check(prefix, C.DEFAULT) is None
+
+
+def test_healthy_venv_creator(python_dir):
+ # venv creator produces slightly different pyvenv.cfg
+ prefix, tmpdir = python_dir
+
+ with envcontext((('VIRTUALENV_CREATOR', 'venv'),)):
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ assert python.health_check(prefix, C.DEFAULT) is None
+
+
+def test_unhealthy_python_goes_missing(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ exe_name = win_exe('python')
+ py_exe = prefix.path(python.bin_dir('py_env-default'), exe_name)
+ os.remove(py_exe)
+
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == (
+ f'virtualenv python version did not match created version:\n'
+ f'- actual version: <<error retrieving version from {py_exe}>>\n'
+ f'- expected version: {python._version_info(sys.executable)}\n'
+ )
+
+
+def test_unhealthy_with_version_change(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ with open(prefix.path('py_env-default/pyvenv.cfg'), 'a+') as f:
+ f.write('version_info = 1.2.3\n')
+
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == (
+ f'virtualenv python version did not match created version:\n'
+ f'- actual version: {python._version_info(sys.executable)}\n'
+ f'- expected version: 1.2.3\n'
+ )
+
+
+def test_unhealthy_system_version_changes(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ with open(prefix.path('py_env-default/pyvenv.cfg'), 'a') as f:
+ f.write('base-executable = /does/not/exist\n')
+
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == (
+ f'base executable python version does not match created version:\n'
+ f'- base-executable version: <<error retrieving version from /does/not/exist>>\n' # noqa: E501
+ f'- expected version: {python._version_info(sys.executable)}\n'
+ )
+
+
+def test_unhealthy_old_virtualenv(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ # simulate "old" virtualenv by deleting this file
+ os.remove(prefix.path('py_env-default/pyvenv.cfg'))
+
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == 'pyvenv.cfg does not exist (old virtualenv?)'
+
+
+def test_unhealthy_unexpected_pyvenv(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ # simulate a buggy environment build (I don't think this is possible)
+ with open(prefix.path('py_env-default/pyvenv.cfg'), 'w'):
+ pass
+
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == "created virtualenv's pyvenv.cfg is missing `version_info`"
+
+
+def test_unhealthy_then_replaced(python_dir):
+ prefix, tmpdir = python_dir
+
+ python.install_environment(prefix, C.DEFAULT, ())
+
+ # simulate an exe which returns an old version
+ exe_name = win_exe('python')
+ py_exe = prefix.path(python.bin_dir('py_env-default'), exe_name)
+ os.rename(py_exe, f'{py_exe}.tmp')
+
+ with open(py_exe, 'w') as f:
+ f.write('#!/usr/bin/env bash\necho 1.2.3\n')
+ make_executable(py_exe)
+
+ # should be unhealthy due to version mismatch
+ ret = python.health_check(prefix, C.DEFAULT)
+ assert ret == (
+ f'virtualenv python version did not match created version:\n'
+ f'- actual version: 1.2.3\n'
+ f'- expected version: {python._version_info(sys.executable)}\n'
+ )
+
+ # now put the exe back and it should be healthy again
+ os.replace(f'{py_exe}.tmp', py_exe)
+
+ assert python.health_check(prefix, C.DEFAULT) is None
+
+
+def test_language_versioned_python_hook(tmp_path):
+ setup_py = '''\
+from setuptools import setup
+setup(
+ name='example',
+ py_modules=['mod'],
+ entry_points={'console_scripts': ['myexe=mod:main']},
+)
+'''
+ tmp_path.joinpath('setup.py').write_text(setup_py)
+ tmp_path.joinpath('mod.py').write_text('def main(): print("ohai")')
+
+ # we patch this to force virtualenv executing with `-p` since we can't
+ # reliably have multiple pythons available in CI
+ with mock.patch.object(
+ python,
+ '_sys_executable_matches',
+ return_value=False,
+ ):
+ assert run_language(tmp_path, python, 'myexe') == (0, b'ohai\n')
+
+
+def _make_hello_hello(tmp_path):
+ setup_py = '''\
+from setuptools import setup
+
+setup(
+ name='socks',
+ version='0.0.0',
+ py_modules=['socks'],
+ entry_points={'console_scripts': ['socks = socks:main']},
+)
+'''
+
+ main_py = '''\
+import sys
+
+def main():
+ print(repr(sys.argv[1:]))
+ print('hello hello')
+ return 0
+'''
+ tmp_path.joinpath('setup.py').write_text(setup_py)
+ tmp_path.joinpath('socks.py').write_text(main_py)
+
+
+def test_simple_python_hook(tmp_path):
+ _make_hello_hello(tmp_path)
+
+ ret = run_language(tmp_path, python, 'socks', [os.devnull])
+ assert ret == (0, f'[{os.devnull!r}]\nhello hello\n'.encode())
+
+
+def test_simple_python_hook_default_version(tmp_path):
+ # make sure that this continues to work for platforms where default
+ # language detection does not work
+ with mock.patch.object(
+ python,
+ 'get_default_version',
+ return_value=C.DEFAULT,
+ ):
+ test_simple_python_hook(tmp_path)
+
+
+def test_python_hook_weird_setup_cfg(tmp_path):
+ _make_hello_hello(tmp_path)
+ setup_cfg = '[install]\ninstall_scripts=/usr/sbin'
+ tmp_path.joinpath('setup.cfg').write_text(setup_cfg)
+
+ ret = run_language(tmp_path, python, 'socks', [os.devnull])
+ assert ret == (0, f'[{os.devnull!r}]\nhello hello\n'.encode())
diff --git a/tests/languages/r_test.py b/tests/languages/r_test.py
new file mode 100644
index 0000000..02c559c
--- /dev/null
+++ b/tests/languages/r_test.py
@@ -0,0 +1,223 @@
+from __future__ import annotations
+
+import os.path
+import shutil
+
+import pytest
+
+from pre_commit import envcontext
+from pre_commit.languages import r
+from pre_commit.prefix import Prefix
+from pre_commit.store import _make_local_repo
+from pre_commit.util import win_exe
+from testing.language_helpers import run_language
+
+
+def test_r_parsing_file_no_opts_no_args(tmp_path):
+ cmd = r._cmd_from_hook(
+ Prefix(str(tmp_path)),
+ 'Rscript some-script.R',
+ (),
+ is_local=False,
+ )
+ assert cmd == (
+ 'Rscript',
+ '--no-save', '--no-restore', '--no-site-file', '--no-environ',
+ str(tmp_path.joinpath('some-script.R')),
+ )
+
+
+def test_r_parsing_file_opts_no_args():
+ with pytest.raises(ValueError) as excinfo:
+ r._entry_validate(['Rscript', '--no-init', '/path/to/file'])
+
+ msg, = excinfo.value.args
+ assert msg == (
+ 'The only valid syntax is `Rscript -e {expr}`'
+ 'or `Rscript path/to/hook/script`'
+ )
+
+
+def test_r_parsing_file_no_opts_args(tmp_path):
+ cmd = r._cmd_from_hook(
+ Prefix(str(tmp_path)),
+ 'Rscript some-script.R',
+ ('--no-cache',),
+ is_local=False,
+ )
+ assert cmd == (
+ 'Rscript',
+ '--no-save', '--no-restore', '--no-site-file', '--no-environ',
+ str(tmp_path.joinpath('some-script.R')),
+ '--no-cache',
+ )
+
+
+def test_r_parsing_expr_no_opts_no_args1(tmp_path):
+ cmd = r._cmd_from_hook(
+ Prefix(str(tmp_path)),
+ "Rscript -e '1+1'",
+ (),
+ is_local=False,
+ )
+ assert cmd == (
+ 'Rscript',
+ '--no-save', '--no-restore', '--no-site-file', '--no-environ',
+ '-e', '1+1',
+ )
+
+
+def test_r_parsing_local_hook_path_is_not_expanded(tmp_path):
+ cmd = r._cmd_from_hook(
+ Prefix(str(tmp_path)),
+ 'Rscript path/to/thing.R',
+ (),
+ is_local=True,
+ )
+ assert cmd == (
+ 'Rscript',
+ '--no-save', '--no-restore', '--no-site-file', '--no-environ',
+ 'path/to/thing.R',
+ )
+
+
+def test_r_parsing_expr_no_opts_no_args2():
+ with pytest.raises(ValueError) as excinfo:
+ r._entry_validate(['Rscript', '-e', '1+1', '-e', 'letters'])
+ msg, = excinfo.value.args
+ assert msg == 'You can supply at most one expression.'
+
+
+def test_r_parsing_expr_opts_no_args2():
+ with pytest.raises(ValueError) as excinfo:
+ r._entry_validate(
+ ['Rscript', '--vanilla', '-e', '1+1', '-e', 'letters'],
+ )
+ msg, = excinfo.value.args
+ assert msg == (
+ 'The only valid syntax is `Rscript -e {expr}`'
+ 'or `Rscript path/to/hook/script`'
+ )
+
+
+def test_r_parsing_expr_args_in_entry2():
+ with pytest.raises(ValueError) as excinfo:
+ r._entry_validate(['Rscript', '-e', 'expr1', '--another-arg'])
+
+ msg, = excinfo.value.args
+ assert msg == 'You can supply at most one expression.'
+
+
+def test_r_parsing_expr_non_Rscirpt():
+ with pytest.raises(ValueError) as excinfo:
+ r._entry_validate(['AnotherScript', '-e', '{{}}'])
+
+ msg, = excinfo.value.args
+ assert msg == 'entry must start with `Rscript`.'
+
+
+def test_rscript_exec_relative_to_r_home():
+ expected = os.path.join('r_home_dir', 'bin', win_exe('Rscript'))
+ with envcontext.envcontext((('R_HOME', 'r_home_dir'),)):
+ assert r._rscript_exec() == expected
+
+
+def test_path_rscript_exec_no_r_home_set():
+ with envcontext.envcontext((('R_HOME', envcontext.UNSET),)):
+ assert r._rscript_exec() == 'Rscript'
+
+
+def test_r_hook(tmp_path):
+ renv_lock = '''\
+{
+ "R": {
+ "Version": "4.0.3",
+ "Repositories": [
+ {
+ "Name": "CRAN",
+ "URL": "https://cloud.r-project.org"
+ }
+ ]
+ },
+ "Packages": {
+ "renv": {
+ "Package": "renv",
+ "Version": "0.12.5",
+ "Source": "Repository",
+ "Repository": "CRAN",
+ "Hash": "5c0cdb37f063c58cdab3c7e9fbb8bd2c"
+ },
+ "rprojroot": {
+ "Package": "rprojroot",
+ "Version": "1.0",
+ "Source": "Repository",
+ "Repository": "CRAN",
+ "Hash": "86704667fe0860e4fec35afdfec137f3"
+ }
+ }
+}
+'''
+ description = '''\
+Package: gli.clu
+Title: What the Package Does (One Line, Title Case)
+Type: Package
+Version: 0.0.0.9000
+Authors@R:
+ person(given = "First",
+ family = "Last",
+ role = c("aut", "cre"),
+ email = "first.last@example.com",
+ comment = c(ORCID = "YOUR-ORCID-ID"))
+Description: What the package does (one paragraph).
+License: `use_mit_license()`, `use_gpl3_license()` or friends to
+ pick a license
+Encoding: UTF-8
+LazyData: true
+Roxygen: list(markdown = TRUE)
+RoxygenNote: 7.1.1
+Imports:
+ rprojroot
+'''
+ hello_world_r = '''\
+stopifnot(
+ packageVersion('rprojroot') == '1.0',
+ packageVersion('gli.clu') == '0.0.0.9000'
+)
+cat("Hello, World, from R!\n")
+'''
+
+ tmp_path.joinpath('renv.lock').write_text(renv_lock)
+ tmp_path.joinpath('DESCRIPTION').write_text(description)
+ tmp_path.joinpath('hello-world.R').write_text(hello_world_r)
+ renv_dir = tmp_path.joinpath('renv')
+ renv_dir.mkdir()
+ shutil.copy(
+ os.path.join(
+ os.path.dirname(__file__),
+ '../../pre_commit/resources/empty_template_activate.R',
+ ),
+ renv_dir.joinpath('activate.R'),
+ )
+
+ expected = (0, b'Hello, World, from R!\n')
+ assert run_language(tmp_path, r, 'Rscript hello-world.R') == expected
+
+
+def test_r_inline(tmp_path):
+ _make_local_repo(str(tmp_path))
+
+ cmd = '''\
+Rscript -e '
+ stopifnot(packageVersion("rprojroot") == "1.0")
+ cat(commandArgs(trailingOnly = TRUE), "from R!\n", sep=", ")
+'
+'''
+
+ ret = run_language(
+ tmp_path,
+ r,
+ cmd,
+ deps=('rprojroot@1.0',),
+ args=('hi', 'hello'),
+ )
+ assert ret == (0, b'hi, hello, from R!\n')
diff --git a/tests/languages/ruby_test.py b/tests/languages/ruby_test.py
new file mode 100644
index 0000000..6397a43
--- /dev/null
+++ b/tests/languages/ruby_test.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import tarfile
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import parse_shebang
+from pre_commit.envcontext import envcontext
+from pre_commit.languages import ruby
+from pre_commit.languages.ruby import _resource_bytesio
+from pre_commit.store import _make_local_repo
+from testing.language_helpers import run_language
+from testing.util import cwd
+from testing.util import xfailif_windows
+
+
+ACTUAL_GET_DEFAULT_VERSION = ruby.get_default_version.__wrapped__
+
+
+@pytest.fixture
+def find_exe_mck():
+ with mock.patch.object(parse_shebang, 'find_executable') as mck:
+ yield mck
+
+
+def test_uses_default_version_when_not_available(find_exe_mck):
+ find_exe_mck.return_value = None
+ assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
+
+
+def test_uses_system_if_both_gem_and_ruby_are_available(find_exe_mck):
+ find_exe_mck.return_value = '/path/to/exe'
+ assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
+
+
+@pytest.mark.parametrize(
+ 'filename',
+ ('rbenv.tar.gz', 'ruby-build.tar.gz', 'ruby-download.tar.gz'),
+)
+def test_archive_root_stat(filename):
+ with _resource_bytesio(filename) as f:
+ with tarfile.open(fileobj=f) as tarf:
+ root, _, _ = filename.partition('.')
+ assert oct(tarf.getmember(root).mode) == '0o755'
+
+
+def _setup_hello_world(tmp_path):
+ bin_dir = tmp_path.joinpath('bin')
+ bin_dir.mkdir()
+ bin_dir.joinpath('ruby_hook').write_text(
+ '#!/usr/bin/env ruby\n'
+ "puts 'Hello world from a ruby hook'\n",
+ )
+ gemspec = '''\
+Gem::Specification.new do |s|
+ s.name = 'ruby_hook'
+ s.version = '0.1.0'
+ s.authors = ['Anthony Sottile']
+ s.summary = 'A ruby hook!'
+ s.description = 'A ruby hook!'
+ s.files = ['bin/ruby_hook']
+ s.executables = ['ruby_hook']
+end
+'''
+ tmp_path.joinpath('ruby_hook.gemspec').write_text(gemspec)
+
+
+def test_ruby_hook_system(tmp_path):
+ assert ruby.get_default_version() == 'system'
+
+ _setup_hello_world(tmp_path)
+
+ ret = run_language(tmp_path, ruby, 'ruby_hook')
+ assert ret == (0, b'Hello world from a ruby hook\n')
+
+
+def test_ruby_with_user_install_set(tmp_path):
+ gemrc = tmp_path.joinpath('gemrc')
+ gemrc.write_text('gem: --user-install\n')
+
+ with envcontext((('GEMRC', str(gemrc)),)):
+ test_ruby_hook_system(tmp_path)
+
+
+def test_ruby_additional_deps(tmp_path):
+ _make_local_repo(tmp_path)
+
+ ret = run_language(
+ tmp_path,
+ ruby,
+ 'ruby -e',
+ args=('require "tins"',),
+ deps=('tins',),
+ )
+ assert ret == (0, b'')
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_ruby_hook_default(tmp_path):
+ _setup_hello_world(tmp_path)
+
+ out, ret = run_language(tmp_path, ruby, 'rbenv --help', version='default')
+ assert out == 0
+ assert ret.startswith(b'Usage: rbenv ')
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_ruby_hook_language_version(tmp_path):
+ _setup_hello_world(tmp_path)
+ tmp_path.joinpath('bin', 'ruby_hook').write_text(
+ '#!/usr/bin/env ruby\n'
+ 'puts RUBY_VERSION\n'
+ "puts 'Hello world from a ruby hook'\n",
+ )
+
+ ret = run_language(tmp_path, ruby, 'ruby_hook', version='3.2.0')
+ assert ret == (0, b'3.2.0\nHello world from a ruby hook\n')
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_ruby_with_bundle_disable_shared_gems(tmp_path):
+ workdir = tmp_path.joinpath('workdir')
+ workdir.mkdir()
+ # this needs a `source` or there's a deprecation warning
+ # silencing this with `BUNDLE_GEMFILE` breaks some tools (#2739)
+ workdir.joinpath('Gemfile').write_text('source ""\ngem "lol_hai"\n')
+ # this bundle config causes things to be written elsewhere
+ bundle = workdir.joinpath('.bundle')
+ bundle.mkdir()
+ bundle.joinpath('config').write_text(
+ 'BUNDLE_DISABLE_SHARED_GEMS: true\n'
+ 'BUNDLE_PATH: vendor/gem\n',
+ )
+
+ with cwd(workdir):
+ # `3.2.0` has new enough `gem` reading `.bundle`
+ test_ruby_hook_language_version(tmp_path)
diff --git a/tests/languages/rust_test.py b/tests/languages/rust_test.py
new file mode 100644
index 0000000..5c17f5b
--- /dev/null
+++ b/tests/languages/rust_test.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import parse_shebang
+from pre_commit.languages import rust
+from pre_commit.store import _make_local_repo
+from testing.language_helpers import run_language
+
+ACTUAL_GET_DEFAULT_VERSION = rust.get_default_version.__wrapped__
+
+
+@pytest.fixture
+def cmd_output_b_mck():
+ with mock.patch.object(rust, 'cmd_output_b') as mck:
+ yield mck
+
+
+def test_sets_system_when_rust_is_available(cmd_output_b_mck):
+ cmd_output_b_mck.return_value = (0, b'', b'')
+ assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
+
+
+def test_uses_default_when_rust_is_not_available(cmd_output_b_mck):
+ cmd_output_b_mck.return_value = (127, b'', b'error: not found')
+ assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
+
+
+def _make_hello_world(tmp_path):
+ src_dir = tmp_path.joinpath('src')
+ src_dir.mkdir()
+ src_dir.joinpath('main.rs').write_text(
+ 'fn main() {\n'
+ ' println!("Hello, world!");\n'
+ '}\n',
+ )
+ tmp_path.joinpath('Cargo.toml').write_text(
+ '[package]\n'
+ 'name = "hello_world"\n'
+ 'version = "0.1.0"\n'
+ 'edition = "2021"\n',
+ )
+
+
+def test_installs_rust_missing_rustup(tmp_path):
+ _make_hello_world(tmp_path)
+
+ # pretend like `rustup` doesn't exist so it gets bootstrapped
+ calls = []
+ orig = parse_shebang.find_executable
+
+ def mck(exe, env=None):
+ calls.append(exe)
+ if len(calls) == 1:
+ assert exe == 'rustup'
+ return None
+ return orig(exe, env=env)
+
+ with mock.patch.object(parse_shebang, 'find_executable', side_effect=mck):
+ ret = run_language(tmp_path, rust, 'hello_world', version='1.56.0')
+ assert calls == ['rustup', 'rustup', 'cargo', 'hello_world']
+ assert ret == (0, b'Hello, world!\n')
+
+
+@pytest.mark.parametrize('version', (C.DEFAULT, '1.56.0'))
+def test_language_version_with_rustup(tmp_path, version):
+ assert parse_shebang.find_executable('rustup') is not None
+
+ _make_hello_world(tmp_path)
+
+ ret = run_language(tmp_path, rust, 'hello_world', version=version)
+ assert ret == (0, b'Hello, world!\n')
+
+
+@pytest.mark.parametrize('dep', ('cli:shellharden:4.2.0', 'cli:shellharden'))
+def test_rust_cli_additional_dependencies(tmp_path, dep):
+ _make_local_repo(str(tmp_path))
+
+ t_sh = tmp_path.joinpath('t.sh')
+ t_sh.write_text('echo $hi\n')
+
+ assert rust.get_default_version() == 'system'
+ ret = run_language(
+ tmp_path,
+ rust,
+ 'shellharden --transform',
+ deps=(dep,),
+ args=(str(t_sh),),
+ )
+ assert ret == (0, b'echo "$hi"\n')
+
+
+def test_run_lib_additional_dependencies(tmp_path):
+ _make_hello_world(tmp_path)
+
+ deps = ('shellharden:4.2.0', 'git-version')
+ ret = run_language(tmp_path, rust, 'hello_world', deps=deps)
+ assert ret == (0, b'Hello, world!\n')
+
+ bin_dir = tmp_path.joinpath('rustenv-system', 'bin')
+ assert bin_dir.is_dir()
+ assert not bin_dir.joinpath('shellharden').exists()
+ assert not bin_dir.joinpath('shellharden.exe').exists()
diff --git a/tests/languages/script_test.py b/tests/languages/script_test.py
new file mode 100644
index 0000000..a02f615
--- /dev/null
+++ b/tests/languages/script_test.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+from pre_commit.languages import script
+from pre_commit.util import make_executable
+from testing.language_helpers import run_language
+
+
+def test_script_language(tmp_path):
+ exe = tmp_path.joinpath('main')
+ exe.write_text('#!/usr/bin/env bash\necho hello hello world\n')
+ make_executable(exe)
+
+ expected = (0, b'hello hello world\n')
+ assert run_language(tmp_path, script, 'main') == expected
diff --git a/tests/languages/swift_test.py b/tests/languages/swift_test.py
new file mode 100644
index 0000000..e0a8ea4
--- /dev/null
+++ b/tests/languages/swift_test.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+import sys
+
+import pytest
+
+from pre_commit.languages import swift
+from testing.language_helpers import run_language
+
+
+@pytest.mark.skipif(
+ sys.platform == 'win32',
+ reason='swift is not supported on windows',
+)
+def test_swift_language(tmp_path): # pragma: win32 no cover
+ package_swift = '''\
+// swift-tools-version:5.0
+import PackageDescription
+
+let package = Package(
+ name: "swift_hooks_repo",
+ targets: [.target(name: "swift_hooks_repo")]
+)
+'''
+ tmp_path.joinpath('Package.swift').write_text(package_swift)
+ src_dir = tmp_path.joinpath('Sources/swift_hooks_repo')
+ src_dir.mkdir(parents=True)
+ src_dir.joinpath('main.swift').write_text('print("Hello, world!")\n')
+
+ expected = (0, b'Hello, world!\n')
+ assert run_language(tmp_path, swift, 'swift_hooks_repo') == expected
diff --git a/tests/languages/system_test.py b/tests/languages/system_test.py
new file mode 100644
index 0000000..dcd9cf1
--- /dev/null
+++ b/tests/languages/system_test.py
@@ -0,0 +1,9 @@
+from __future__ import annotations
+
+from pre_commit.languages import system
+from testing.language_helpers import run_language
+
+
+def test_system_language(tmp_path):
+ expected = (0, b'hello hello world\n')
+ assert run_language(tmp_path, system, 'echo hello hello world') == expected
diff --git a/tests/logging_handler_test.py b/tests/logging_handler_test.py
new file mode 100644
index 0000000..dc43a99
--- /dev/null
+++ b/tests/logging_handler_test.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+import logging
+
+from pre_commit import color
+from pre_commit.logging_handler import LoggingHandler
+
+
+def _log_record(message, level):
+ return logging.LogRecord('name', level, '', 1, message, {}, None)
+
+
+def test_logging_handler_color(cap_out):
+ handler = LoggingHandler(True)
+ handler.emit(_log_record('hi', logging.WARNING))
+ ret = cap_out.get()
+ assert ret == f'{color.YELLOW}[WARNING]{color.NORMAL} hi\n'
+
+
+def test_logging_handler_no_color(cap_out):
+ handler = LoggingHandler(False)
+ handler.emit(_log_record('hi', logging.WARNING))
+ assert cap_out.get() == '[WARNING] hi\n'
diff --git a/tests/main_test.py b/tests/main_test.py
new file mode 100644
index 0000000..945349f
--- /dev/null
+++ b/tests/main_test.py
@@ -0,0 +1,224 @@
+from __future__ import annotations
+
+import argparse
+import os.path
+from unittest import mock
+
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import main
+from pre_commit.errors import FatalError
+from pre_commit.util import cmd_output
+from testing.auto_namedtuple import auto_namedtuple
+from testing.util import cwd
+
+
+def _args(**kwargs):
+ kwargs.setdefault('command', 'help')
+ kwargs.setdefault('config', C.CONFIG_FILE)
+ if kwargs['command'] in {'run', 'try-repo'}:
+ kwargs.setdefault('commit_msg_filename', None)
+ return argparse.Namespace(**kwargs)
+
+
+def test_adjust_args_and_chdir_not_in_git_dir(in_tmpdir):
+ with pytest.raises(FatalError):
+ main._adjust_args_and_chdir(_args())
+
+
+def test_adjust_args_and_chdir_noop(in_git_dir):
+ args = _args(command='run', files=['f1', 'f2'])
+ main._adjust_args_and_chdir(args)
+ assert os.getcwd() == in_git_dir
+ assert args.config == C.CONFIG_FILE
+ assert args.files == ['f1', 'f2']
+
+
+def test_adjust_args_and_chdir_relative_things(in_git_dir):
+ in_git_dir.join('foo/cfg.yaml').ensure()
+ with in_git_dir.join('foo').as_cwd():
+ args = _args(command='run', files=['f1', 'f2'], config='cfg.yaml')
+ main._adjust_args_and_chdir(args)
+ assert os.getcwd() == in_git_dir
+ assert args.config == os.path.join('foo', 'cfg.yaml')
+ assert args.files == [
+ os.path.join('foo', 'f1'),
+ os.path.join('foo', 'f2'),
+ ]
+
+
+def test_adjust_args_and_chdir_relative_commit_msg(in_git_dir):
+ in_git_dir.join('foo/cfg.yaml').ensure()
+ with in_git_dir.join('foo').as_cwd():
+ args = _args(command='run', files=[], commit_msg_filename='t.txt')
+ main._adjust_args_and_chdir(args)
+ assert os.getcwd() == in_git_dir
+ assert args.commit_msg_filename == os.path.join('foo', 't.txt')
+
+
+@pytest.mark.skipif(os.name != 'nt', reason='windows feature')
+def test_install_on_subst(in_git_dir, store): # pragma: posix no cover
+ assert not os.path.exists('Z:')
+ cmd_output('subst', 'Z:', str(in_git_dir))
+ try:
+ with cwd('Z:'):
+ test_adjust_args_and_chdir_noop('Z:\\')
+ finally:
+ cmd_output('subst', '/d', 'Z:')
+
+
+def test_adjust_args_and_chdir_non_relative_config(in_git_dir):
+ with in_git_dir.join('foo').ensure_dir().as_cwd():
+ args = _args()
+ main._adjust_args_and_chdir(args)
+ assert os.getcwd() == in_git_dir
+ assert args.config == C.CONFIG_FILE
+
+
+def test_adjust_args_try_repo_repo_relative(in_git_dir):
+ with in_git_dir.join('foo').ensure_dir().as_cwd():
+ args = _args(command='try-repo', repo='../foo', files=[])
+ assert args.repo is not None
+ assert os.path.exists(args.repo)
+ main._adjust_args_and_chdir(args)
+ assert os.getcwd() == in_git_dir
+ assert os.path.exists(args.repo)
+ assert args.repo == 'foo'
+
+
+FNS = (
+ 'autoupdate', 'clean', 'gc', 'hook_impl', 'install', 'install_hooks',
+ 'migrate_config', 'run', 'sample_config', 'uninstall',
+ 'validate_config', 'validate_manifest',
+)
+CMDS = tuple(fn.replace('_', '-') for fn in FNS)
+
+
+@pytest.fixture
+def mock_commands():
+ mcks = {fn: mock.patch.object(main, fn).start() for fn in FNS}
+ ret = auto_namedtuple(**mcks)
+ yield ret
+ for mck in ret:
+ mck.stop()
+
+
+@pytest.fixture
+def argparse_parse_args_spy():
+ parse_args_mock = mock.Mock()
+
+ original_parse_args = argparse.ArgumentParser.parse_args
+
+ def fake_parse_args(self, args):
+ # call our spy object
+ parse_args_mock(args)
+ return original_parse_args(self, args)
+
+ with mock.patch.object(
+ argparse.ArgumentParser, 'parse_args', fake_parse_args,
+ ):
+ yield parse_args_mock
+
+
+def assert_only_one_mock_called(mock_objs):
+ total_call_count = sum(mock_obj.call_count for mock_obj in mock_objs)
+ assert total_call_count == 1
+
+
+def test_overall_help(mock_commands):
+ with pytest.raises(SystemExit):
+ main.main(['--help'])
+
+
+def test_help_command(mock_commands, argparse_parse_args_spy):
+ with pytest.raises(SystemExit):
+ main.main(['help'])
+
+ argparse_parse_args_spy.assert_has_calls([
+ mock.call(['help']),
+ mock.call(['--help']),
+ ])
+
+
+def test_help_other_command(mock_commands, argparse_parse_args_spy):
+ with pytest.raises(SystemExit):
+ main.main(['help', 'run'])
+
+ argparse_parse_args_spy.assert_has_calls([
+ mock.call(['help', 'run']),
+ mock.call(['run', '--help']),
+ ])
+
+
+@pytest.mark.parametrize('command', CMDS)
+def test_all_cmds(command, mock_commands, mock_store_dir):
+ main.main((command,))
+ assert getattr(mock_commands, command.replace('-', '_')).call_count == 1
+ assert_only_one_mock_called(mock_commands)
+
+
+def test_try_repo(mock_store_dir):
+ with mock.patch.object(main, 'try_repo') as patch:
+ main.main(('try-repo', '.'))
+ assert patch.call_count == 1
+
+
+def test_init_templatedir(mock_store_dir):
+ with mock.patch.object(main, 'init_templatedir') as patch:
+ main.main(('init-templatedir', 'tdir'))
+
+ assert patch.call_count == 1
+ assert 'tdir' in patch.call_args[0]
+ assert patch.call_args[1]['hook_types'] is None
+ assert patch.call_args[1]['skip_on_missing_config'] is True
+
+
+def test_init_templatedir_options(mock_store_dir):
+ args = (
+ 'init-templatedir',
+ 'tdir',
+ '--hook-type',
+ 'commit-msg',
+ '--no-allow-missing-config',
+ )
+ with mock.patch.object(main, 'init_templatedir') as patch:
+ main.main(args)
+
+ assert patch.call_count == 1
+ assert 'tdir' in patch.call_args[0]
+ assert patch.call_args[1]['hook_types'] == ['commit-msg']
+ assert patch.call_args[1]['skip_on_missing_config'] is False
+
+
+def test_help_cmd_in_empty_directory(
+ in_tmpdir,
+ mock_commands,
+ argparse_parse_args_spy,
+):
+ with pytest.raises(SystemExit):
+ main.main(['help', 'run'])
+
+ argparse_parse_args_spy.assert_has_calls([
+ mock.call(['help', 'run']),
+ mock.call(['run', '--help']),
+ ])
+
+
+def test_expected_fatal_error_no_git_repo(in_tmpdir, cap_out, mock_store_dir):
+ with pytest.raises(SystemExit):
+ main.main([])
+ log_file = os.path.join(mock_store_dir, 'pre-commit.log')
+ cap_out_lines = cap_out.get().splitlines()
+ assert (
+ cap_out_lines[-2] ==
+ 'An error has occurred: FatalError: git failed. '
+ 'Is it installed, and are you in a Git repository directory?'
+ )
+ assert cap_out_lines[-1] == f'Check the log at {log_file}'
+
+
+def test_hook_stage_migration(mock_store_dir):
+ with mock.patch.object(main, 'run') as mck:
+ main.main(('run', '--hook-stage', 'commit'))
+ assert mck.call_args[0][2].hook_stage == 'pre-commit'
diff --git a/tests/meta_hooks/__init__.py b/tests/meta_hooks/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/meta_hooks/__init__.py
diff --git a/tests/meta_hooks/check_hooks_apply_test.py b/tests/meta_hooks/check_hooks_apply_test.py
new file mode 100644
index 0000000..63f9715
--- /dev/null
+++ b/tests/meta_hooks/check_hooks_apply_test.py
@@ -0,0 +1,140 @@
+from __future__ import annotations
+
+from pre_commit.meta_hooks import check_hooks_apply
+from testing.fixtures import add_config_to_repo
+
+
+def test_hook_excludes_everything(capsys, in_git_dir, mock_store_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'exclude': '.pre-commit-config.yaml',
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_hooks_apply.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ assert 'check-useless-excludes does not apply to this repository' in out
+
+
+def test_hook_includes_nothing(capsys, in_git_dir, mock_store_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'files': 'foo',
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_hooks_apply.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ assert 'check-useless-excludes does not apply to this repository' in out
+
+
+def test_hook_types_not_matched(capsys, in_git_dir, mock_store_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'types': ['python'],
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_hooks_apply.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ assert 'check-useless-excludes does not apply to this repository' in out
+
+
+def test_hook_types_excludes_everything(capsys, in_git_dir, mock_store_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'exclude_types': ['yaml'],
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_hooks_apply.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ assert 'check-useless-excludes does not apply to this repository' in out
+
+
+def test_valid_exceptions(capsys, in_git_dir, mock_store_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'local',
+ 'hooks': [
+ # applies to a file
+ {
+ 'id': 'check-yaml',
+ 'name': 'check yaml',
+ 'entry': './check-yaml',
+ 'language': 'script',
+ 'files': r'\.yaml$',
+ },
+ # Should not be reported as an error due to language: fail
+ {
+ 'id': 'changelogs-rst',
+ 'name': 'changelogs must be rst',
+ 'entry': 'changelog filenames must end in .rst',
+ 'language': 'fail',
+ 'files': r'changelog/.*(?<!\.rst)$',
+ },
+ # Should not be reported as an error due to always_run
+ {
+ 'id': 'i-always-run',
+ 'name': 'make check',
+ 'entry': 'make check',
+ 'language': 'system',
+ 'files': '^$',
+ 'always_run': True,
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_hooks_apply.main(()) == 0
+
+ out, _ = capsys.readouterr()
+ assert out == ''
diff --git a/tests/meta_hooks/check_useless_excludes_test.py b/tests/meta_hooks/check_useless_excludes_test.py
new file mode 100644
index 0000000..15b68b4
--- /dev/null
+++ b/tests/meta_hooks/check_useless_excludes_test.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+from pre_commit import git
+from pre_commit.meta_hooks import check_useless_excludes
+from pre_commit.util import cmd_output
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import make_config_from_repo
+from testing.fixtures import make_repo
+from testing.util import xfailif_windows
+
+
+def test_useless_exclude_global(capsys, in_git_dir):
+ config = {
+ 'exclude': 'foo',
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [{'id': 'check-useless-excludes'}],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_useless_excludes.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ out = out.strip()
+ assert "The global exclude pattern 'foo' does not match any files" == out
+
+
+def test_useless_exclude_for_hook(capsys, in_git_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [{'id': 'check-useless-excludes', 'exclude': 'foo'}],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_useless_excludes.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ out = out.strip()
+ expected = (
+ "The exclude pattern 'foo' for check-useless-excludes "
+ 'does not match any files'
+ )
+ assert expected == out
+
+
+def test_useless_exclude_with_types_filter(capsys, in_git_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'exclude': '.pre-commit-config.yaml',
+ 'types': ['python'],
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_useless_excludes.main(()) == 1
+
+ out, _ = capsys.readouterr()
+ out = out.strip()
+ expected = (
+ "The exclude pattern '.pre-commit-config.yaml' for "
+ 'check-useless-excludes does not match any files'
+ )
+ assert expected == out
+
+
+def test_no_excludes(capsys, in_git_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [{'id': 'check-useless-excludes'}],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_useless_excludes.main(()) == 0
+
+ out, _ = capsys.readouterr()
+ assert out == ''
+
+
+def test_valid_exclude(capsys, in_git_dir):
+ config = {
+ 'repos': [
+ {
+ 'repo': 'meta',
+ 'hooks': [
+ {
+ 'id': 'check-useless-excludes',
+ 'exclude': '.pre-commit-config.yaml',
+ },
+ ],
+ },
+ ],
+ }
+
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ assert check_useless_excludes.main(()) == 0
+
+ out, _ = capsys.readouterr()
+ assert out == ''
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_useless_excludes_broken_symlink(capsys, in_git_dir, tempdir_factory):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ config = make_config_from_repo(path)
+ config['hooks'][0]['exclude'] = 'broken-symlink'
+ add_config_to_repo(in_git_dir.strpath, config)
+
+ in_git_dir.join('broken-symlink').mksymlinkto('DNE')
+ cmd_output('git', 'add', 'broken-symlink')
+ git.commit()
+
+ assert check_useless_excludes.main(('.pre-commit-config.yaml',)) == 0
+
+ out, _ = capsys.readouterr()
+ assert out == ''
diff --git a/tests/meta_hooks/identity_test.py b/tests/meta_hooks/identity_test.py
new file mode 100644
index 0000000..97c20ea
--- /dev/null
+++ b/tests/meta_hooks/identity_test.py
@@ -0,0 +1,8 @@
+from __future__ import annotations
+
+from pre_commit.meta_hooks import identity
+
+
+def test_identity(cap_out):
+ assert not identity.main(('a', 'b', 'c'))
+ assert cap_out.get() == 'a\nb\nc\n'
diff --git a/tests/output_test.py b/tests/output_test.py
new file mode 100644
index 0000000..c806829
--- /dev/null
+++ b/tests/output_test.py
@@ -0,0 +1,11 @@
+from __future__ import annotations
+
+import io
+
+from pre_commit import output
+
+
+def test_output_write_writes():
+ stream = io.BytesIO()
+ output.write('hello world', stream)
+ assert stream.getvalue() == b'hello world'
diff --git a/tests/parse_shebang_test.py b/tests/parse_shebang_test.py
new file mode 100644
index 0000000..bd4384d
--- /dev/null
+++ b/tests/parse_shebang_test.py
@@ -0,0 +1,154 @@
+from __future__ import annotations
+
+import contextlib
+import os.path
+import shutil
+import sys
+
+import pytest
+
+from pre_commit import parse_shebang
+from pre_commit.envcontext import envcontext
+from pre_commit.envcontext import Var
+from pre_commit.util import make_executable
+
+
+def _echo_exe() -> str:
+ exe = shutil.which('echo')
+ assert exe is not None
+ return exe
+
+
+def test_file_doesnt_exist():
+ assert parse_shebang.parse_filename('herp derp derp') == ()
+
+
+def test_simple_case(tmpdir):
+ x = tmpdir.join('f')
+ x.write('#!/usr/bin/env echo')
+ make_executable(x.strpath)
+ assert parse_shebang.parse_filename(x.strpath) == ('echo',)
+
+
+def test_find_executable_full_path():
+ assert parse_shebang.find_executable(sys.executable) == sys.executable
+
+
+def test_find_executable_on_path():
+ assert parse_shebang.find_executable('echo') == _echo_exe()
+
+
+def test_find_executable_not_found_none():
+ assert parse_shebang.find_executable('not-a-real-executable') is None
+
+
+def write_executable(shebang, filename='run'):
+ os.mkdir('bin')
+ path = os.path.join('bin', filename)
+ with open(path, 'w') as f:
+ f.write(f'#!{shebang}')
+ make_executable(path)
+ return path
+
+
+@contextlib.contextmanager
+def bin_on_path():
+ bindir = os.path.join(os.getcwd(), 'bin')
+ with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
+ yield
+
+
+def test_find_executable_path_added(in_tmpdir):
+ path = os.path.abspath(write_executable('/usr/bin/env sh'))
+ assert parse_shebang.find_executable('run') is None
+ with bin_on_path():
+ assert parse_shebang.find_executable('run') == path
+
+
+def test_find_executable_path_ext(in_tmpdir):
+ """Windows exports PATHEXT as a list of extensions to automatically add
+ to executables when doing PATH searching.
+ """
+ exe_path = os.path.abspath(
+ write_executable('/usr/bin/env sh', filename='run.myext'),
+ )
+ env_path = {'PATH': os.path.dirname(exe_path)}
+ env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
+ assert parse_shebang.find_executable('run') is None
+ assert parse_shebang.find_executable('run', env=env_path) is None
+ ret = parse_shebang.find_executable('run.myext', env=env_path)
+ assert ret == exe_path
+ ret = parse_shebang.find_executable('run', env=env_path_ext)
+ assert ret == exe_path
+
+
+def test_normexe_does_not_exist():
+ with pytest.raises(OSError) as excinfo:
+ parse_shebang.normexe('i-dont-exist-lol')
+ assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
+
+
+def test_normexe_does_not_exist_sep():
+ with pytest.raises(OSError) as excinfo:
+ parse_shebang.normexe('./i-dont-exist-lol')
+ assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
+
+
+@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
+def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
+ tmpdir.join('exe').ensure()
+ with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
+ parse_shebang.normexe('./exe')
+ assert excinfo.value.args == ('Executable `./exe` is not executable',)
+
+
+def test_normexe_is_a_directory(tmpdir):
+ with tmpdir.as_cwd():
+ tmpdir.join('exe').ensure_dir()
+ exe = os.path.join('.', 'exe')
+ with pytest.raises(OSError) as excinfo:
+ parse_shebang.normexe(exe)
+ msg, = excinfo.value.args
+ assert msg == f'Executable `{exe}` is a directory'
+
+
+def test_normexe_already_full_path():
+ assert parse_shebang.normexe(sys.executable) == sys.executable
+
+
+def test_normexe_gives_full_path():
+ assert parse_shebang.normexe('echo') == _echo_exe()
+ assert os.sep in _echo_exe()
+
+
+def test_normalize_cmd_trivial():
+ cmd = (_echo_exe(), 'hi')
+ assert parse_shebang.normalize_cmd(cmd) == cmd
+
+
+def test_normalize_cmd_PATH():
+ cmd = ('echo', '--version')
+ expected = (_echo_exe(), '--version')
+ assert parse_shebang.normalize_cmd(cmd) == expected
+
+
+def test_normalize_cmd_shebang(in_tmpdir):
+ us = sys.executable.replace(os.sep, '/')
+ path = write_executable(us)
+ assert parse_shebang.normalize_cmd((path,)) == (us, path)
+
+
+def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
+ us = sys.executable.replace(os.sep, '/')
+ path = write_executable(us)
+ with bin_on_path():
+ ret = parse_shebang.normalize_cmd(('run',))
+ assert ret == (us, os.path.abspath(path))
+
+
+def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
+ echo = _echo_exe()
+ path = write_executable('/usr/bin/env echo')
+ with bin_on_path():
+ ret = parse_shebang.normalize_cmd(('run',))
+ assert ret == (echo, os.path.abspath(path))
diff --git a/tests/prefix_test.py b/tests/prefix_test.py
new file mode 100644
index 0000000..1eac087
--- /dev/null
+++ b/tests/prefix_test.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+import os.path
+
+import pytest
+
+from pre_commit.prefix import Prefix
+
+
+def norm_slash(*args):
+ return tuple(x.replace('/', os.sep) for x in args)
+
+
+@pytest.mark.parametrize(
+ ('prefix', 'path_end', 'expected_output'),
+ (
+ norm_slash('foo', '', 'foo'),
+ norm_slash('foo', 'bar', 'foo/bar'),
+ norm_slash('foo/bar', '../baz', 'foo/baz'),
+ norm_slash('./', 'bar', 'bar'),
+ norm_slash('./', '', '.'),
+ norm_slash('/tmp/foo', '/tmp/bar', '/tmp/bar'),
+ ),
+)
+def test_path(prefix, path_end, expected_output):
+ instance = Prefix(prefix)
+ ret = instance.path(path_end)
+ assert ret == expected_output
+
+
+def test_path_multiple_args():
+ instance = Prefix('foo')
+ ret = instance.path('bar', 'baz')
+ assert ret == os.path.join('foo', 'bar', 'baz')
+
+
+def test_exists(tmpdir):
+ assert not Prefix(str(tmpdir)).exists('foo')
+ tmpdir.ensure('foo')
+ assert Prefix(str(tmpdir)).exists('foo')
+
+
+def test_star(tmpdir):
+ for f in ('a.txt', 'b.txt', 'c.py'):
+ tmpdir.join(f).ensure()
+ assert set(Prefix(str(tmpdir)).star('.txt')) == {'a.txt', 'b.txt'}
diff --git a/tests/repository_test.py b/tests/repository_test.py
new file mode 100644
index 0000000..ac065ec
--- /dev/null
+++ b/tests/repository_test.py
@@ -0,0 +1,533 @@
+from __future__ import annotations
+
+import os.path
+import shlex
+import shutil
+import sys
+from typing import Any
+from unittest import mock
+
+import cfgv
+import pytest
+
+import pre_commit.constants as C
+from pre_commit import lang_base
+from pre_commit.all_languages import languages
+from pre_commit.clientlib import CONFIG_SCHEMA
+from pre_commit.clientlib import load_manifest
+from pre_commit.hook import Hook
+from pre_commit.languages import python
+from pre_commit.languages import system
+from pre_commit.prefix import Prefix
+from pre_commit.repository import _hook_installed
+from pre_commit.repository import all_hooks
+from pre_commit.repository import install_hook_envs
+from pre_commit.util import cmd_output
+from pre_commit.util import cmd_output_b
+from testing.fixtures import make_config_from_repo
+from testing.fixtures import make_repo
+from testing.language_helpers import run_language
+from testing.util import cwd
+from testing.util import get_resource_path
+
+
+def _hook_run(hook, filenames, color):
+ return run_language(
+ path=hook.prefix.prefix_dir,
+ language=languages[hook.language],
+ exe=hook.entry,
+ args=hook.args,
+ file_args=filenames,
+ version=hook.language_version,
+ deps=hook.additional_dependencies,
+ is_local=hook.src == 'local',
+ require_serial=hook.require_serial,
+ color=color,
+ )
+
+
+def _get_hook_no_install(repo_config, store, hook_id):
+ config = {'repos': [repo_config]}
+ config = cfgv.validate(config, CONFIG_SCHEMA)
+ config = cfgv.apply_defaults(config, CONFIG_SCHEMA)
+ hooks = all_hooks(config, store)
+ hook, = (hook for hook in hooks if hook.id == hook_id)
+ return hook
+
+
+def _get_hook(repo_config, store, hook_id):
+ hook = _get_hook_no_install(repo_config, store, hook_id)
+ install_hook_envs([hook], store)
+ return hook
+
+
+def _test_hook_repo(
+ tempdir_factory,
+ store,
+ repo_path,
+ hook_id,
+ args,
+ expected,
+ expected_return_code=0,
+ config_kwargs=None,
+ color=False,
+):
+ path = make_repo(tempdir_factory, repo_path)
+ config = make_config_from_repo(path, **(config_kwargs or {}))
+ hook = _get_hook(config, store, hook_id)
+ ret, out = _hook_run(hook, args, color=color)
+ assert ret == expected_return_code
+ assert out == expected
+
+
+def test_python_venv_deprecation(store, caplog):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'example',
+ 'name': 'example',
+ 'language': 'python_venv',
+ 'entry': 'echo hi',
+ }],
+ }
+ _get_hook(config, store, 'example')
+ assert caplog.messages[-1] == (
+ '`repo: local` uses deprecated `language: python_venv`. '
+ 'This is an alias for `language: python`. '
+ 'Often `pre-commit autoupdate --repo local` will fix this.'
+ )
+
+
+def test_system_hook_with_spaces(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'system_hook_with_spaces_repo',
+ 'system-hook-with-spaces', [os.devnull], b'Hello World\n',
+ )
+
+
+def test_missing_executable(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'not_found_exe',
+ 'not-found-exe', [os.devnull],
+ b'Executable `i-dont-exist-lol` not found',
+ expected_return_code=1,
+ )
+
+
+def test_run_a_script_hook(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'script_hooks_repo',
+ 'bash_hook', ['bar'], b'bar\nHello World\n',
+ )
+
+
+def test_run_hook_with_spaced_args(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'arg_per_line_hooks_repo',
+ 'arg-per-line',
+ ['foo bar', 'baz'],
+ b'arg: hello\narg: world\narg: foo bar\narg: baz\n',
+ )
+
+
+def test_run_hook_with_curly_braced_arguments(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'arg_per_line_hooks_repo',
+ 'arg-per-line',
+ [],
+ b"arg: hi {1}\narg: I'm {a} problem\n",
+ config_kwargs={
+ 'hooks': [{
+ 'id': 'arg-per-line',
+ 'args': ['hi {1}', "I'm {a} problem"],
+ }],
+ },
+ )
+
+
+def test_intermixed_stdout_stderr(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'stdout_stderr_repo',
+ 'stdout-stderr',
+ [],
+ b'0\n1\n2\n3\n4\n5\n',
+ )
+
+
+@pytest.mark.xfail(sys.platform == 'win32', reason='ptys are posix-only')
+def test_output_isatty(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'stdout_stderr_repo',
+ 'tty-check',
+ [],
+ b'stdin: False\nstdout: True\nstderr: True\n',
+ color=True,
+ )
+
+
+def _norm_pwd(path):
+ # Under windows bash's temp and windows temp is different.
+ # This normalizes to the bash /tmp
+ return cmd_output_b(
+ 'bash', '-c', f"cd '{path}' && pwd",
+ )[1].strip()
+
+
+def test_cwd_of_hook(in_git_dir, tempdir_factory, store):
+ # Note: this doubles as a test for `system` hooks
+ _test_hook_repo(
+ tempdir_factory, store, 'prints_cwd_repo',
+ 'prints_cwd', ['-L'], _norm_pwd(in_git_dir.strpath) + b'\n',
+ )
+
+
+def test_lots_of_files(tempdir_factory, store):
+ _test_hook_repo(
+ tempdir_factory, store, 'script_hooks_repo',
+ 'bash_hook', [os.devnull] * 15000, mock.ANY,
+ )
+
+
+def test_additional_dependencies_roll_forward(tempdir_factory, store):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+
+ config1 = make_config_from_repo(path)
+ hook1 = _get_hook(config1, store, 'foo')
+ with python.in_env(hook1.prefix, hook1.language_version):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
+
+ # Make another repo with additional dependencies
+ config2 = make_config_from_repo(path)
+ config2['hooks'][0]['additional_dependencies'] = ['mccabe']
+ hook2 = _get_hook(config2, store, 'foo')
+ with python.in_env(hook2.prefix, hook2.language_version):
+ assert 'mccabe' in cmd_output('pip', 'freeze', '-l')[1]
+
+ # should not have affected original
+ with python.in_env(hook1.prefix, hook1.language_version):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
+
+
+@pytest.mark.parametrize('v', ('v1', 'v2'))
+def test_repository_state_compatibility(tempdir_factory, store, v):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+
+ config = make_config_from_repo(path)
+ hook = _get_hook(config, store, 'foo')
+ envdir = lang_base.environment_dir(
+ hook.prefix,
+ python.ENVIRONMENT_DIR,
+ hook.language_version,
+ )
+ os.remove(os.path.join(envdir, f'.install_state_{v}'))
+ assert _hook_installed(hook) is True
+
+
+def test_unknown_keys(store, caplog):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'too-much',
+ 'name': 'too much',
+ 'hello': 'world',
+ 'foo': 'bar',
+ 'language': 'system',
+ 'entry': 'true',
+ }],
+ }
+ _get_hook(config, store, 'too-much')
+ msg, = caplog.messages
+ assert msg == 'Unexpected key(s) present on local => too-much: foo, hello'
+
+
+def test_reinstall(tempdir_factory, store, log_info_mock):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ config = make_config_from_repo(path)
+ _get_hook(config, store, 'foo')
+ # We print some logging during clone (1) + install (3)
+ assert log_info_mock.call_count == 4
+ log_info_mock.reset_mock()
+ # Reinstall on another run should not trigger another install
+ _get_hook(config, store, 'foo')
+ assert log_info_mock.call_count == 0
+
+
+def test_control_c_control_c_on_install(tempdir_factory, store):
+ """Regression test for #186."""
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ config = make_config_from_repo(path)
+ hooks = [_get_hook_no_install(config, store, 'foo')]
+
+ class MyKeyboardInterrupt(KeyboardInterrupt):
+ pass
+
+ # To simulate a killed install, we'll make PythonEnv.run raise ^C
+ # and then to simulate a second ^C during cleanup, we'll make shutil.rmtree
+ # raise as well.
+ with pytest.raises(MyKeyboardInterrupt):
+ with mock.patch.object(
+ lang_base, 'setup_cmd', side_effect=MyKeyboardInterrupt,
+ ):
+ with mock.patch.object(
+ shutil, 'rmtree', side_effect=MyKeyboardInterrupt,
+ ):
+ install_hook_envs(hooks, store)
+
+ # Should have made an environment, however this environment is broken!
+ hook, = hooks
+ envdir = lang_base.environment_dir(
+ hook.prefix,
+ python.ENVIRONMENT_DIR,
+ hook.language_version,
+ )
+
+ assert os.path.exists(envdir)
+
+ # However, it should be perfectly runnable (reinstall after botched
+ # install)
+ install_hook_envs(hooks, store)
+ ret, out = _hook_run(hook, (), color=False)
+ assert ret == 0
+
+
+def test_invalidated_virtualenv(tempdir_factory, store):
+ # A cached virtualenv may become invalidated if the system python upgrades
+ # This should not cause every hook in that virtualenv to fail.
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ config = make_config_from_repo(path)
+ hook = _get_hook(config, store, 'foo')
+
+ # Simulate breaking of the virtualenv
+ envdir = lang_base.environment_dir(
+ hook.prefix,
+ python.ENVIRONMENT_DIR,
+ hook.language_version,
+ )
+ libdir = os.path.join(envdir, 'lib', hook.language_version)
+ paths = [
+ os.path.join(libdir, p) for p in ('site.py', 'site.pyc', '__pycache__')
+ ]
+ cmd_output_b('rm', '-rf', *paths)
+
+ # pre-commit should rebuild the virtualenv and it should be runnable
+ hook = _get_hook(config, store, 'foo')
+ ret, out = _hook_run(hook, (), color=False)
+ assert ret == 0
+
+
+def test_really_long_file_paths(tempdir_factory, store):
+ base_path = tempdir_factory.get()
+ really_long_path = os.path.join(base_path, 'really_long' * 10)
+ cmd_output_b('git', 'init', really_long_path)
+
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ config = make_config_from_repo(path)
+
+ with cwd(really_long_path):
+ _get_hook(config, store, 'foo')
+
+
+def test_config_overrides_repo_specifics(tempdir_factory, store):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ config = make_config_from_repo(path)
+
+ hook = _get_hook(config, store, 'bash_hook')
+ assert hook.files == ''
+ # Set the file regex to something else
+ config['hooks'][0]['files'] = '\\.sh$'
+ hook = _get_hook(config, store, 'bash_hook')
+ assert hook.files == '\\.sh$'
+
+
+def _create_repo_with_tags(tempdir_factory, src, tag):
+ path = make_repo(tempdir_factory, src)
+ cmd_output_b('git', 'tag', tag, cwd=path)
+ return path
+
+
+def test_tags_on_repositories(in_tmpdir, tempdir_factory, store):
+ tag = 'v1.1'
+ git1 = _create_repo_with_tags(tempdir_factory, 'prints_cwd_repo', tag)
+ git2 = _create_repo_with_tags(tempdir_factory, 'script_hooks_repo', tag)
+
+ config1 = make_config_from_repo(git1, rev=tag)
+ hook1 = _get_hook(config1, store, 'prints_cwd')
+ ret1, out1 = _hook_run(hook1, ('-L',), color=False)
+ assert ret1 == 0
+ assert out1.strip() == _norm_pwd(in_tmpdir)
+
+ config2 = make_config_from_repo(git2, rev=tag)
+ hook2 = _get_hook(config2, store, 'bash_hook')
+ ret2, out2 = _hook_run(hook2, ('bar',), color=False)
+ assert ret2 == 0
+ assert out2 == b'bar\nHello World\n'
+
+
+@pytest.fixture
+def local_python_config():
+ # Make a "local" hooks repo that just installs our other hooks repo
+ repo_path = get_resource_path('python_hooks_repo')
+ manifest = load_manifest(os.path.join(repo_path, C.MANIFEST_FILE))
+ hooks = [
+ dict(hook, additional_dependencies=[repo_path]) for hook in manifest
+ ]
+ return {'repo': 'local', 'hooks': hooks}
+
+
+def test_local_python_repo(store, local_python_config):
+ hook = _get_hook(local_python_config, store, 'foo')
+ # language_version should have been adjusted to the interpreter version
+ assert hook.language_version != C.DEFAULT
+ ret, out = _hook_run(hook, ('filename',), color=False)
+ assert ret == 0
+ assert out == b"['filename']\nHello World\n"
+
+
+def test_default_language_version(store, local_python_config):
+ config: dict[str, Any] = {
+ 'default_language_version': {'python': 'fake'},
+ 'default_stages': ['pre-commit'],
+ 'repos': [local_python_config],
+ }
+
+ # `language_version` was not set, should default
+ hook, = all_hooks(config, store)
+ assert hook.language_version == 'fake'
+
+ # `language_version` is set, should not default
+ config['repos'][0]['hooks'][0]['language_version'] = 'fake2'
+ hook, = all_hooks(config, store)
+ assert hook.language_version == 'fake2'
+
+
+def test_default_stages(store, local_python_config):
+ config: dict[str, Any] = {
+ 'default_language_version': {'python': C.DEFAULT},
+ 'default_stages': ['pre-commit'],
+ 'repos': [local_python_config],
+ }
+
+ # `stages` was not set, should default
+ hook, = all_hooks(config, store)
+ assert hook.stages == ['pre-commit']
+
+ # `stages` is set, should not default
+ config['repos'][0]['hooks'][0]['stages'] = ['pre-push']
+ hook, = all_hooks(config, store)
+ assert hook.stages == ['pre-push']
+
+
+def test_hook_id_not_present(tempdir_factory, store, caplog):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ config = make_config_from_repo(path)
+ config['hooks'][0]['id'] = 'i-dont-exist'
+ with pytest.raises(SystemExit):
+ _get_hook(config, store, 'i-dont-exist')
+ _, msg = caplog.messages
+ assert msg == (
+ f'`i-dont-exist` is not present in repository file://{path}. '
+ f'Typo? Perhaps it is introduced in a newer version? '
+ f'Often `pre-commit autoupdate` fixes this.'
+ )
+
+
+def test_manifest_hooks(tempdir_factory, store):
+ path = make_repo(tempdir_factory, 'script_hooks_repo')
+ config = make_config_from_repo(path)
+ hook = _get_hook(config, store, 'bash_hook')
+
+ assert hook == Hook(
+ src=f'file://{path}',
+ prefix=Prefix(mock.ANY),
+ additional_dependencies=[],
+ alias='',
+ always_run=False,
+ args=[],
+ description='',
+ entry='bin/hook.sh',
+ exclude='^$',
+ exclude_types=[],
+ files='',
+ id='bash_hook',
+ language='script',
+ language_version='default',
+ log_file='',
+ minimum_pre_commit_version='0',
+ name='Bash hook',
+ pass_filenames=True,
+ require_serial=False,
+ stages=[
+ 'commit-msg',
+ 'post-checkout',
+ 'post-commit',
+ 'post-merge',
+ 'post-rewrite',
+ 'pre-commit',
+ 'pre-merge-commit',
+ 'pre-push',
+ 'pre-rebase',
+ 'prepare-commit-msg',
+ 'manual',
+ ],
+ types=['file'],
+ types_or=[],
+ verbose=False,
+ fail_fast=False,
+ )
+
+
+def test_non_installable_hook_error_for_language_version(store, caplog):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'system-hook',
+ 'name': 'system-hook',
+ 'language': 'system',
+ 'entry': 'python3 -c "import sys; print(sys.version)"',
+ 'language_version': 'python3.10',
+ }],
+ }
+ with pytest.raises(SystemExit) as excinfo:
+ _get_hook(config, store, 'system-hook')
+ assert excinfo.value.code == 1
+
+ msg, = caplog.messages
+ assert msg == (
+ 'The hook `system-hook` specifies `language_version` but is using '
+ 'language `system` which does not install an environment. '
+ 'Perhaps you meant to use a specific language?'
+ )
+
+
+def test_non_installable_hook_error_for_additional_dependencies(store, caplog):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'system-hook',
+ 'name': 'system-hook',
+ 'language': 'system',
+ 'entry': 'python3 -c "import sys; print(sys.version)"',
+ 'additional_dependencies': ['astpretty'],
+ }],
+ }
+ with pytest.raises(SystemExit) as excinfo:
+ _get_hook(config, store, 'system-hook')
+ assert excinfo.value.code == 1
+
+ msg, = caplog.messages
+ assert msg == (
+ 'The hook `system-hook` specifies `additional_dependencies` but is '
+ 'using language `system` which does not install an environment. '
+ 'Perhaps you meant to use a specific language?'
+ )
+
+
+def test_args_with_spaces_and_quotes(tmp_path):
+ ret = run_language(
+ tmp_path, system,
+ f"{shlex.quote(sys.executable)} -c 'import sys; print(sys.argv[1:])'",
+ ('i have spaces', 'and"\'quotes', '$and !this'),
+ )
+
+ expected = b"['i have spaces', 'and\"\\'quotes', '$and !this']\n"
+ assert ret == (0, expected)
diff --git a/tests/staged_files_only_test.py b/tests/staged_files_only_test.py
new file mode 100644
index 0000000..cd2f638
--- /dev/null
+++ b/tests/staged_files_only_test.py
@@ -0,0 +1,449 @@
+from __future__ import annotations
+
+import contextlib
+import itertools
+import os.path
+import shutil
+
+import pytest
+import re_assert
+
+from pre_commit import git
+from pre_commit.errors import FatalError
+from pre_commit.staged_files_only import staged_files_only
+from pre_commit.util import cmd_output
+from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import git_dir
+from testing.util import cwd
+from testing.util import get_resource_path
+from testing.util import git_commit
+from testing.util import xfailif_windows
+
+
+FOO_CONTENTS = '\n'.join(('1', '2', '3', '4', '5', '6', '7', '8', ''))
+
+
+@pytest.fixture
+def patch_dir(tempdir_factory):
+ return tempdir_factory.get()
+
+
+def get_short_git_status():
+ git_status = cmd_output('git', 'status', '-s')[1]
+ line_parts = [line.split() for line in git_status.splitlines()]
+ return {v: k for k, v in line_parts}
+
+
+@pytest.fixture
+def foo_staged(in_git_dir):
+ foo = in_git_dir.join('foo')
+ foo.write(FOO_CONTENTS)
+ cmd_output('git', 'add', 'foo')
+ yield auto_namedtuple(path=in_git_dir.strpath, foo_filename=foo.strpath)
+
+
+def _test_foo_state(
+ path,
+ foo_contents=FOO_CONTENTS,
+ status='A',
+ encoding='UTF-8',
+):
+ assert os.path.exists(path.foo_filename)
+ with open(path.foo_filename, encoding=encoding) as f:
+ assert f.read() == foo_contents
+ actual_status = get_short_git_status()['foo']
+ assert status == actual_status
+
+
+def test_foo_staged(foo_staged):
+ _test_foo_state(foo_staged)
+
+
+def test_foo_nothing_unstaged(foo_staged, patch_dir):
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+ _test_foo_state(foo_staged)
+
+
+def test_foo_something_unstaged(foo_staged, patch_dir):
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write('herp\nderp\n')
+
+ _test_foo_state(foo_staged, 'herp\nderp\n', 'AM')
+
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+
+ _test_foo_state(foo_staged, 'herp\nderp\n', 'AM')
+
+
+def test_does_not_crash_patch_dir_does_not_exist(foo_staged, patch_dir):
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write('hello\nworld\n')
+
+ shutil.rmtree(patch_dir)
+ with staged_files_only(patch_dir):
+ pass
+
+
+def test_something_unstaged_ext_diff_tool(foo_staged, patch_dir, tmpdir):
+ diff_tool = tmpdir.join('diff-tool.sh')
+ diff_tool.write('#!/usr/bin/env bash\necho "$@"\n')
+ cmd_output('git', 'config', 'diff.external', diff_tool.strpath)
+ test_foo_something_unstaged(foo_staged, patch_dir)
+
+
+def test_foo_something_unstaged_diff_color_always(foo_staged, patch_dir):
+ cmd_output('git', 'config', '--local', 'color.diff', 'always')
+ test_foo_something_unstaged(foo_staged, patch_dir)
+
+
+def test_foo_both_modify_non_conflicting(foo_staged, patch_dir):
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write(f'{FOO_CONTENTS}9\n')
+
+ _test_foo_state(foo_staged, f'{FOO_CONTENTS}9\n', 'AM')
+
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+
+ # Modify the file as part of the "pre-commit"
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write(FOO_CONTENTS.replace('1', 'a'))
+
+ _test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
+
+ _test_foo_state(foo_staged, f'{FOO_CONTENTS.replace("1", "a")}9\n', 'AM')
+
+
+def test_foo_both_modify_conflicting(foo_staged, patch_dir):
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write(FOO_CONTENTS.replace('1', 'a'))
+
+ _test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
+
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+
+ # Modify in the same place as the stashed diff
+ with open(foo_staged.foo_filename, 'w') as foo_file:
+ foo_file.write(FOO_CONTENTS.replace('1', 'b'))
+
+ _test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'b'), 'AM')
+
+ _test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
+
+
+@pytest.fixture
+def img_staged(in_git_dir):
+ img = in_git_dir.join('img.jpg')
+ shutil.copy(get_resource_path('img1.jpg'), img.strpath)
+ cmd_output('git', 'add', 'img.jpg')
+ yield auto_namedtuple(path=in_git_dir.strpath, img_filename=img.strpath)
+
+
+def _test_img_state(path, expected_file='img1.jpg', status='A'):
+ assert os.path.exists(path.img_filename)
+ with open(path.img_filename, 'rb') as f1:
+ with open(get_resource_path(expected_file), 'rb') as f2:
+ assert f1.read() == f2.read()
+ actual_status = get_short_git_status()['img.jpg']
+ assert status == actual_status
+
+
+def test_img_staged(img_staged):
+ _test_img_state(img_staged)
+
+
+def test_img_nothing_unstaged(img_staged, patch_dir):
+ with staged_files_only(patch_dir):
+ _test_img_state(img_staged)
+ _test_img_state(img_staged)
+
+
+def test_img_something_unstaged(img_staged, patch_dir):
+ shutil.copy(get_resource_path('img2.jpg'), img_staged.img_filename)
+
+ _test_img_state(img_staged, 'img2.jpg', 'AM')
+
+ with staged_files_only(patch_dir):
+ _test_img_state(img_staged)
+
+ _test_img_state(img_staged, 'img2.jpg', 'AM')
+
+
+def test_img_conflict(img_staged, patch_dir):
+ """Admittedly, this shouldn't happen, but just in case."""
+ shutil.copy(get_resource_path('img2.jpg'), img_staged.img_filename)
+
+ _test_img_state(img_staged, 'img2.jpg', 'AM')
+
+ with staged_files_only(patch_dir):
+ _test_img_state(img_staged)
+ shutil.copy(get_resource_path('img3.jpg'), img_staged.img_filename)
+ _test_img_state(img_staged, 'img3.jpg', 'AM')
+
+ _test_img_state(img_staged, 'img2.jpg', 'AM')
+
+
+@pytest.fixture
+def repo_with_commits(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ open('foo', 'a+').close()
+ cmd_output('git', 'add', 'foo')
+ git_commit()
+ rev1 = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
+ git_commit()
+ rev2 = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
+ yield auto_namedtuple(path=path, rev1=rev1, rev2=rev2)
+
+
+def checkout_submodule(rev):
+ cmd_output('git', 'checkout', rev, cwd='sub')
+
+
+@pytest.fixture
+def sub_staged(repo_with_commits, tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ open('bar', 'a+').close()
+ cmd_output('git', 'add', 'bar')
+ git_commit()
+ cmd_output(
+ 'git', 'submodule', 'add', repo_with_commits.path, 'sub',
+ )
+ checkout_submodule(repo_with_commits.rev1)
+ cmd_output('git', 'add', 'sub')
+ yield auto_namedtuple(
+ path=path,
+ sub_path=os.path.join(path, 'sub'),
+ submodule=repo_with_commits,
+ )
+
+
+def _test_sub_state(path, rev='rev1', status='A'):
+ assert os.path.exists(path.sub_path)
+ with cwd(path.sub_path):
+ actual_rev = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
+ assert actual_rev == getattr(path.submodule, rev)
+ actual_status = get_short_git_status()['sub']
+ assert actual_status == status
+
+
+def test_sub_staged(sub_staged):
+ _test_sub_state(sub_staged)
+
+
+def test_sub_nothing_unstaged(sub_staged, patch_dir):
+ with staged_files_only(patch_dir):
+ _test_sub_state(sub_staged)
+ _test_sub_state(sub_staged)
+
+
+def test_sub_something_unstaged(sub_staged, patch_dir):
+ checkout_submodule(sub_staged.submodule.rev2)
+
+ _test_sub_state(sub_staged, 'rev2', 'AM')
+
+ with staged_files_only(patch_dir):
+ # This is different from others, we don't want to touch subs
+ _test_sub_state(sub_staged, 'rev2', 'AM')
+
+ _test_sub_state(sub_staged, 'rev2', 'AM')
+
+
+def test_submodule_does_not_discard_changes(sub_staged, patch_dir):
+ with open('bar', 'w') as f:
+ f.write('unstaged changes')
+
+ foo_path = os.path.join(sub_staged.sub_path, 'foo')
+ with open(foo_path, 'w') as f:
+ f.write('foo contents')
+
+ with staged_files_only(patch_dir):
+ with open('bar') as f:
+ assert f.read() == ''
+
+ with open(foo_path) as f:
+ assert f.read() == 'foo contents'
+
+ with open('bar') as f:
+ assert f.read() == 'unstaged changes'
+
+ with open(foo_path) as f:
+ assert f.read() == 'foo contents'
+
+
+def test_submodule_does_not_discard_changes_recurse(sub_staged, patch_dir):
+ cmd_output('git', 'config', 'submodule.recurse', '1', cwd=sub_staged.path)
+
+ test_submodule_does_not_discard_changes(sub_staged, patch_dir)
+
+
+def test_stage_utf8_changes(foo_staged, patch_dir):
+ contents = '\u2603'
+ with open('foo', 'w', encoding='UTF-8') as foo_file:
+ foo_file.write(contents)
+
+ _test_foo_state(foo_staged, contents, 'AM')
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+ _test_foo_state(foo_staged, contents, 'AM')
+
+
+def test_stage_non_utf8_changes(foo_staged, patch_dir):
+ contents = 'ΓΊ'
+ # Produce a latin-1 diff
+ with open('foo', 'w', encoding='latin-1') as foo_file:
+ foo_file.write(contents)
+
+ _test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+ _test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
+
+
+def test_non_utf8_conflicting_diff(foo_staged, patch_dir):
+ """Regression test for #397"""
+ # The trailing whitespace is important here, this triggers git to produce
+ # an error message which looks like:
+ #
+ # ...patch1471530032:14: trailing whitespace.
+ # [[unprintable character]][[space character]]
+ # error: patch failed: foo:1
+ # error: foo: patch does not apply
+ #
+ # Previously, the error message (though discarded immediately) was being
+ # decoded with the UTF-8 codec (causing a crash)
+ contents = 'ΓΊ \n'
+ with open('foo', 'w', encoding='latin-1') as foo_file:
+ foo_file.write(contents)
+
+ _test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
+ with staged_files_only(patch_dir):
+ _test_foo_state(foo_staged)
+ # Create a conflicting diff that will need to be rolled back
+ with open('foo', 'w') as foo_file:
+ foo_file.write('')
+ _test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
+
+
+def _write(b):
+ with open('foo', 'wb') as f:
+ f.write(b)
+
+
+def assert_no_diff():
+ tree = cmd_output('git', 'write-tree')[1].strip()
+ cmd_output('git', 'diff-index', tree, '--exit-code')
+
+
+bool_product = tuple(itertools.product((True, False), repeat=2))
+
+
+@pytest.mark.parametrize(('crlf_before', 'crlf_after'), bool_product)
+@pytest.mark.parametrize('autocrlf', ('true', 'false', 'input'))
+def test_crlf(in_git_dir, patch_dir, crlf_before, crlf_after, autocrlf):
+ cmd_output('git', 'config', '--local', 'core.autocrlf', autocrlf)
+
+ before, after = b'1\n2\n', b'3\n4\n\n'
+ before = before.replace(b'\n', b'\r\n') if crlf_before else before
+ after = after.replace(b'\n', b'\r\n') if crlf_after else after
+
+ _write(before)
+ cmd_output('git', 'add', 'foo')
+ _write(after)
+ with staged_files_only(patch_dir):
+ assert_no_diff()
+
+
+@pytest.mark.parametrize('autocrlf', ('true', 'input'))
+def test_crlf_diff_only(in_git_dir, patch_dir, autocrlf):
+ # due to a quirk (?) in git -- a diff only in crlf does not show but
+ # still results in an exit code of `1`
+ # we treat this as "no diff" -- though ideally it would discard the diff
+ # while committing
+ cmd_output('git', 'config', '--local', 'core.autocrlf', autocrlf)
+
+ _write(b'1\r\n2\r\n3\r\n')
+ cmd_output('git', 'add', 'foo')
+ _write(b'1\n2\n3\n')
+ with staged_files_only(patch_dir):
+ pass
+
+
+def test_whitespace_errors(in_git_dir, patch_dir):
+ cmd_output('git', 'config', '--local', 'apply.whitespace', 'error')
+ test_crlf(in_git_dir, patch_dir, True, True, 'true')
+
+
+def test_autocrlf_committed_crlf(in_git_dir, patch_dir):
+ """Regression test for #570"""
+ cmd_output('git', 'config', '--local', 'core.autocrlf', 'false')
+ _write(b'1\r\n2\r\n')
+ cmd_output('git', 'add', 'foo')
+ git_commit()
+
+ cmd_output('git', 'config', '--local', 'core.autocrlf', 'true')
+ _write(b'1\r\n2\r\n\r\n\r\n\r\n')
+
+ with staged_files_only(patch_dir):
+ assert_no_diff()
+
+
+def test_intent_to_add(in_git_dir, patch_dir):
+ """Regression test for #881"""
+ _write(b'hello\nworld\n')
+ cmd_output('git', 'add', '--intent-to-add', 'foo')
+
+ assert git.intent_to_add_files() == ['foo']
+ with staged_files_only(patch_dir):
+ assert_no_diff()
+ assert git.intent_to_add_files() == ['foo']
+
+
+@contextlib.contextmanager
+def _unreadable(f):
+ orig = os.stat(f).st_mode
+ os.chmod(f, 0o000)
+ try:
+ yield
+ finally:
+ os.chmod(f, orig)
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_failed_diff_does_not_discard_changes(in_git_dir, patch_dir):
+ # stage 3 files
+ for i in range(3):
+ with open(str(i), 'w') as f:
+ f.write(str(i))
+ cmd_output('git', 'add', '0', '1', '2')
+
+ # modify all of their contents
+ for i in range(3):
+ with open(str(i), 'w') as f:
+ f.write('new contents')
+
+ with _unreadable('1'):
+ with pytest.raises(FatalError) as excinfo:
+ with staged_files_only(patch_dir):
+ raise AssertionError('should have errored on enter')
+
+ # the diff command failed to produce a diff of `1`
+ msg, = excinfo.value.args
+ re_assert.Matches(
+ r'^pre-commit failed to diff -- perhaps due to permissions\?\n\n'
+ r'command: .*\n'
+ r'return code: 128\n'
+ r'stdout: \(none\)\n'
+ r'stderr:\n'
+ r' error: open\("1"\): Permission denied\n'
+ r' fatal: cannot hash 1$',
+ ).assert_matches(msg)
+
+ # even though it errored, the unstaged changes should still be present
+ for i in range(3):
+ with open(str(i)) as f:
+ assert f.read() == 'new contents'
diff --git a/tests/store_test.py b/tests/store_test.py
new file mode 100644
index 0000000..45ec732
--- /dev/null
+++ b/tests/store_test.py
@@ -0,0 +1,272 @@
+from __future__ import annotations
+
+import os.path
+import sqlite3
+import stat
+from unittest import mock
+
+import pytest
+
+from pre_commit import git
+from pre_commit.store import _get_default_directory
+from pre_commit.store import _LOCAL_RESOURCES
+from pre_commit.store import Store
+from pre_commit.util import CalledProcessError
+from pre_commit.util import cmd_output
+from testing.fixtures import git_dir
+from testing.util import cwd
+from testing.util import git_commit
+from testing.util import xfailif_windows
+
+
+def test_our_session_fixture_works():
+ """There's a session fixture which makes `Store` invariantly raise to
+ prevent writing to the home directory.
+ """
+ with pytest.raises(AssertionError):
+ Store()
+
+
+def test_get_default_directory_defaults_to_home():
+ # Not we use the module level one which is not mocked
+ ret = _get_default_directory()
+ expected = os.path.realpath(os.path.expanduser('~/.cache/pre-commit'))
+ assert ret == expected
+
+
+def test_adheres_to_xdg_specification():
+ with mock.patch.dict(
+ os.environ, {'XDG_CACHE_HOME': '/tmp/fakehome'},
+ ):
+ ret = _get_default_directory()
+ expected = os.path.realpath('/tmp/fakehome/pre-commit')
+ assert ret == expected
+
+
+def test_uses_environment_variable_when_present():
+ with mock.patch.dict(
+ os.environ, {'PRE_COMMIT_HOME': '/tmp/pre_commit_home'},
+ ):
+ ret = _get_default_directory()
+ expected = os.path.realpath('/tmp/pre_commit_home')
+ assert ret == expected
+
+
+def test_store_init(store):
+ # Should create the store directory
+ assert os.path.exists(store.directory)
+ # Should create a README file indicating what the directory is about
+ with open(os.path.join(store.directory, 'README')) as readme_file:
+ readme_contents = readme_file.read()
+ for text_line in (
+ 'This directory is maintained by the pre-commit project.',
+ 'Learn more: https://github.com/pre-commit/pre-commit',
+ ):
+ assert text_line in readme_contents
+
+
+def test_clone(store, tempdir_factory, log_info_mock):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ git_commit()
+ rev = git.head_rev(path)
+ git_commit()
+
+ ret = store.clone(path, rev)
+ # Should have printed some stuff
+ assert log_info_mock.call_args_list[0][0][0].startswith(
+ 'Initializing environment for ',
+ )
+
+ # Should return a directory inside of the store
+ assert os.path.exists(ret)
+ assert ret.startswith(store.directory)
+ # Directory should start with `repo`
+ _, dirname = os.path.split(ret)
+ assert dirname.startswith('repo')
+ # Should be checked out to the rev we specified
+ assert git.head_rev(ret) == rev
+
+ # Assert there's an entry in the sqlite db for this
+ assert store.select_all_repos() == [(path, rev, ret)]
+
+
+def test_clone_cleans_up_on_checkout_failure(store):
+ with pytest.raises(Exception) as excinfo:
+ # This raises an exception because you can't clone something that
+ # doesn't exist!
+ store.clone('/i_dont_exist_lol', 'fake_rev')
+ assert '/i_dont_exist_lol' in str(excinfo.value)
+
+ repo_dirs = [
+ d for d in os.listdir(store.directory) if d.startswith('repo')
+ ]
+ assert repo_dirs == []
+
+
+def test_clone_when_repo_already_exists(store):
+ # Create an entry in the sqlite db that makes it look like the repo has
+ # been cloned.
+ with sqlite3.connect(store.db_path) as db:
+ db.execute(
+ 'INSERT INTO repos (repo, ref, path) '
+ 'VALUES ("fake_repo", "fake_ref", "fake_path")',
+ )
+
+ assert store.clone('fake_repo', 'fake_ref') == 'fake_path'
+
+
+def test_clone_shallow_failure_fallback_to_complete(
+ store, tempdir_factory,
+ log_info_mock,
+):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ git_commit()
+ rev = git.head_rev(path)
+ git_commit()
+
+ # Force shallow clone failure
+ def fake_shallow_clone(self, *args, **kwargs):
+ raise CalledProcessError(1, (), b'', None)
+ store._shallow_clone = fake_shallow_clone
+
+ ret = store.clone(path, rev)
+
+ # Should have printed some stuff
+ assert log_info_mock.call_args_list[0][0][0].startswith(
+ 'Initializing environment for ',
+ )
+
+ # Should return a directory inside of the store
+ assert os.path.exists(ret)
+ assert ret.startswith(store.directory)
+ # Directory should start with `repo`
+ _, dirname = os.path.split(ret)
+ assert dirname.startswith('repo')
+ # Should be checked out to the rev we specified
+ assert git.head_rev(ret) == rev
+
+ # Assert there's an entry in the sqlite db for this
+ assert store.select_all_repos() == [(path, rev, ret)]
+
+
+def test_clone_tag_not_on_mainline(store, tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ git_commit()
+ cmd_output('git', 'checkout', 'master', '-b', 'branch')
+ git_commit()
+ cmd_output('git', 'tag', 'v1')
+ cmd_output('git', 'checkout', 'master')
+ cmd_output('git', 'branch', '-D', 'branch')
+
+ # previously crashed on unreachable refs
+ store.clone(path, 'v1')
+
+
+def test_create_when_directory_exists_but_not_db(store):
+ # In versions <= 0.3.5, there was no sqlite db causing a need for
+ # backward compatibility
+ os.remove(store.db_path)
+ store = Store(store.directory)
+ assert os.path.exists(store.db_path)
+
+
+def test_create_when_store_already_exists(store):
+ # an assertion that this is idempotent and does not crash
+ Store(store.directory)
+
+
+def test_db_repo_name(store):
+ assert store.db_repo_name('repo', ()) == 'repo'
+ assert store.db_repo_name('repo', ('b', 'a', 'c')) == 'repo:b,a,c'
+
+
+def test_local_resources_reflects_reality():
+ on_disk = {
+ res.removeprefix('empty_template_')
+ for res in os.listdir('pre_commit/resources')
+ if res.startswith('empty_template_')
+ }
+ assert on_disk == {os.path.basename(x) for x in _LOCAL_RESOURCES}
+
+
+def test_mark_config_as_used(store, tmpdir):
+ with tmpdir.as_cwd():
+ f = tmpdir.join('f').ensure()
+ store.mark_config_used('f')
+ assert store.select_all_configs() == [f.strpath]
+
+
+def test_mark_config_as_used_idempotent(store, tmpdir):
+ test_mark_config_as_used(store, tmpdir)
+ test_mark_config_as_used(store, tmpdir)
+
+
+def test_mark_config_as_used_does_not_exist(store):
+ store.mark_config_used('f')
+ assert store.select_all_configs() == []
+
+
+def _simulate_pre_1_14_0(store):
+ with store.connect() as db:
+ db.executescript('DROP TABLE configs')
+
+
+def test_select_all_configs_roll_forward(store):
+ _simulate_pre_1_14_0(store)
+ assert store.select_all_configs() == []
+
+
+def test_mark_config_as_used_roll_forward(store, tmpdir):
+ _simulate_pre_1_14_0(store)
+ test_mark_config_as_used(store, tmpdir)
+
+
+@xfailif_windows # pragma: win32 no cover
+def test_mark_config_as_used_readonly(tmpdir):
+ cfg = tmpdir.join('f').ensure()
+ store_dir = tmpdir.join('store')
+ # make a store, then we'll convert its directory to be readonly
+ assert not Store(str(store_dir)).readonly # directory didn't exist
+ assert not Store(str(store_dir)).readonly # directory did exist
+
+ def _chmod_minus_w(p):
+ st = os.stat(p)
+ os.chmod(p, st.st_mode & ~(stat.S_IWUSR | stat.S_IWOTH | stat.S_IWGRP))
+
+ _chmod_minus_w(store_dir)
+ for fname in os.listdir(store_dir):
+ assert not os.path.isdir(fname)
+ _chmod_minus_w(os.path.join(store_dir, fname))
+
+ store = Store(str(store_dir))
+ assert store.readonly
+ # should be skipped due to readonly
+ store.mark_config_used(str(cfg))
+ assert store.select_all_configs() == []
+
+
+def test_clone_with_recursive_submodules(store, tmp_path):
+ sub = tmp_path.joinpath('sub')
+ sub.mkdir()
+ sub.joinpath('submodule').write_text('i am a submodule')
+ cmd_output('git', '-C', str(sub), 'init', '.')
+ cmd_output('git', '-C', str(sub), 'add', '.')
+ git.commit(str(sub))
+
+ repo = tmp_path.joinpath('repo')
+ repo.mkdir()
+ repo.joinpath('repository').write_text('i am a repo')
+ cmd_output('git', '-C', str(repo), 'init', '.')
+ cmd_output('git', '-C', str(repo), 'add', '.')
+ cmd_output('git', '-C', str(repo), 'submodule', 'add', str(sub), 'sub')
+ git.commit(str(repo))
+
+ rev = git.head_rev(str(repo))
+ ret = store.clone(str(repo), rev)
+
+ assert os.path.exists(ret)
+ assert os.path.exists(os.path.join(ret, str(repo), 'repository'))
+ assert os.path.exists(os.path.join(ret, str(sub), 'submodule'))
diff --git a/tests/util_test.py b/tests/util_test.py
new file mode 100644
index 0000000..5b26211
--- /dev/null
+++ b/tests/util_test.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+import os.path
+import stat
+import subprocess
+
+import pytest
+
+from pre_commit.util import CalledProcessError
+from pre_commit.util import clean_path_on_failure
+from pre_commit.util import cmd_output
+from pre_commit.util import cmd_output_b
+from pre_commit.util import cmd_output_p
+from pre_commit.util import make_executable
+from pre_commit.util import rmtree
+
+
+def test_CalledProcessError_str():
+ error = CalledProcessError(1, ('exe',), b'output\n', b'errors\n')
+ assert str(error) == (
+ "command: ('exe',)\n"
+ 'return code: 1\n'
+ 'stdout:\n'
+ ' output\n'
+ 'stderr:\n'
+ ' errors'
+ )
+
+
+def test_CalledProcessError_str_nooutput():
+ error = CalledProcessError(1, ('exe',), b'', b'')
+ assert str(error) == (
+ "command: ('exe',)\n"
+ 'return code: 1\n'
+ 'stdout: (none)\n'
+ 'stderr: (none)'
+ )
+
+
+def test_clean_on_failure_noop(in_tmpdir):
+ with clean_path_on_failure('foo'):
+ pass
+
+
+def test_clean_path_on_failure_does_nothing_when_not_raising(in_tmpdir):
+ with clean_path_on_failure('foo'):
+ os.mkdir('foo')
+ assert os.path.exists('foo')
+
+
+def test_clean_path_on_failure_cleans_for_normal_exception(in_tmpdir):
+ class MyException(Exception):
+ pass
+
+ with pytest.raises(MyException):
+ with clean_path_on_failure('foo'):
+ os.mkdir('foo')
+ raise MyException
+
+ assert not os.path.exists('foo')
+
+
+def test_clean_path_on_failure_cleans_for_system_exit(in_tmpdir):
+ class MySystemExit(SystemExit):
+ pass
+
+ with pytest.raises(MySystemExit):
+ with clean_path_on_failure('foo'):
+ os.mkdir('foo')
+ raise MySystemExit
+
+ assert not os.path.exists('foo')
+
+
+def test_cmd_output_exe_not_found():
+ ret, out, _ = cmd_output('dne', check=False)
+ assert ret == 1
+ assert out == 'Executable `dne` not found'
+
+
+@pytest.mark.parametrize('fn', (cmd_output_b, cmd_output_p))
+def test_cmd_output_exe_not_found_bytes(fn):
+ ret, out, _ = fn('dne', check=False, stderr=subprocess.STDOUT)
+ assert ret == 1
+ assert out == b'Executable `dne` not found'
+
+
+@pytest.mark.parametrize('fn', (cmd_output_b, cmd_output_p))
+def test_cmd_output_no_shebang(tmpdir, fn):
+ f = tmpdir.join('f').ensure()
+ make_executable(f)
+
+ # previously this raised `OSError` -- the output is platform specific
+ ret, out, _ = fn(str(f), check=False, stderr=subprocess.STDOUT)
+ assert ret == 1
+ assert isinstance(out, bytes)
+ assert out.endswith(b'\n')
+
+
+def test_rmtree_read_only_directories(tmpdir):
+ """Simulates the go module tree. See #1042"""
+ tmpdir.join('x/y/z').ensure_dir().join('a').ensure()
+ mode = os.stat(str(tmpdir.join('x'))).st_mode
+ mode_no_w = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
+ tmpdir.join('x/y/z').chmod(mode_no_w)
+ tmpdir.join('x/y/z').chmod(mode_no_w)
+ tmpdir.join('x/y/z').chmod(mode_no_w)
+ rmtree(str(tmpdir.join('x')))
diff --git a/tests/xargs_test.py b/tests/xargs_test.py
new file mode 100644
index 0000000..e8000b2
--- /dev/null
+++ b/tests/xargs_test.py
@@ -0,0 +1,251 @@
+from __future__ import annotations
+
+import concurrent.futures
+import multiprocessing
+import os
+import sys
+import time
+from unittest import mock
+
+import pytest
+
+from pre_commit import parse_shebang
+from pre_commit import xargs
+
+
+def test_cpu_count_sched_getaffinity_exists():
+ with mock.patch.object(
+ os, 'sched_getaffinity', create=True, return_value=set(range(345)),
+ ):
+ assert xargs.cpu_count() == 345
+
+
+@pytest.fixture
+def no_sched_getaffinity():
+ # Simulates an OS without os.sched_getaffinity available (mac/windows)
+ # https://docs.python.org/3/library/os.html#interface-to-the-scheduler
+ with mock.patch.object(
+ os,
+ 'sched_getaffinity',
+ create=True,
+ side_effect=AttributeError,
+ ):
+ yield
+
+
+def test_cpu_count_multiprocessing_cpu_count_implemented(no_sched_getaffinity):
+ with mock.patch.object(multiprocessing, 'cpu_count', return_value=123):
+ assert xargs.cpu_count() == 123
+
+
+def test_cpu_count_multiprocessing_cpu_count_not_implemented(
+ no_sched_getaffinity,
+):
+ with mock.patch.object(
+ multiprocessing, 'cpu_count', side_effect=NotImplementedError,
+ ):
+ assert xargs.cpu_count() == 1
+
+
+@pytest.mark.parametrize(
+ ('env', 'expected'),
+ (
+ ({}, 0),
+ ({b'x': b'1'}, 12),
+ ({b'x': b'12'}, 13),
+ ({b'x': b'1', b'y': b'2'}, 24),
+ ),
+)
+def test_environ_size(env, expected):
+ # normalize integer sizing
+ assert xargs._environ_size(_env=env) == expected
+
+
+@pytest.fixture
+def win32_mock():
+ with mock.patch.object(sys, 'getfilesystemencoding', return_value='utf-8'):
+ with mock.patch.object(sys, 'platform', 'win32'):
+ yield
+
+
+@pytest.fixture
+def linux_mock():
+ with mock.patch.object(sys, 'getfilesystemencoding', return_value='utf-8'):
+ with mock.patch.object(sys, 'platform', 'linux'):
+ yield
+
+
+def test_partition_trivial():
+ assert xargs.partition(('cmd',), (), 1) == (('cmd',),)
+
+
+def test_partition_simple():
+ assert xargs.partition(('cmd',), ('foo',), 1) == (('cmd', 'foo'),)
+
+
+def test_partition_limits():
+ ret = xargs.partition(
+ ('ninechars',), (
+ # Just match the end (with spaces)
+ '.' * 5, '.' * 4,
+ # Just match the end (single arg)
+ '.' * 10,
+ # Goes over the end
+ '.' * 5,
+ '.' * 6,
+ ),
+ 1,
+ _max_length=21,
+ )
+ assert ret == (
+ ('ninechars', '.' * 5, '.' * 4),
+ ('ninechars', '.' * 10),
+ ('ninechars', '.' * 5),
+ ('ninechars', '.' * 6),
+ )
+
+
+def test_partition_limit_win32(win32_mock):
+ cmd = ('ninechars',)
+ # counted as half because of utf-16 encode
+ varargs = ('πŸ˜‘' * 5,)
+ ret = xargs.partition(cmd, varargs, 1, _max_length=21)
+ assert ret == (cmd + varargs,)
+
+
+def test_partition_limit_linux(linux_mock):
+ cmd = ('ninechars',)
+ varargs = ('πŸ˜‘' * 5,)
+ ret = xargs.partition(cmd, varargs, 1, _max_length=31)
+ assert ret == (cmd + varargs,)
+
+
+def test_argument_too_long_with_large_unicode(linux_mock):
+ cmd = ('ninechars',)
+ varargs = ('πŸ˜‘' * 10,) # 4 bytes * 10
+ with pytest.raises(xargs.ArgumentTooLongError):
+ xargs.partition(cmd, varargs, 1, _max_length=20)
+
+
+def test_partition_target_concurrency():
+ ret = xargs.partition(
+ ('foo',), ('A',) * 22,
+ 4,
+ _max_length=50,
+ )
+ assert ret == (
+ ('foo',) + ('A',) * 6,
+ ('foo',) + ('A',) * 6,
+ ('foo',) + ('A',) * 6,
+ ('foo',) + ('A',) * 4,
+ )
+
+
+def test_partition_target_concurrency_wont_make_tiny_partitions():
+ ret = xargs.partition(
+ ('foo',), ('A',) * 10,
+ 4,
+ _max_length=50,
+ )
+ assert ret == (
+ ('foo',) + ('A',) * 4,
+ ('foo',) + ('A',) * 4,
+ ('foo',) + ('A',) * 2,
+ )
+
+
+def test_argument_too_long():
+ with pytest.raises(xargs.ArgumentTooLongError):
+ xargs.partition(('a' * 5,), ('a' * 5,), 1, _max_length=10)
+
+
+def test_xargs_smoke():
+ ret, out = xargs.xargs(('echo',), ('hello', 'world'))
+ assert ret == 0
+ assert out.replace(b'\r\n', b'\n') == b'hello world\n'
+
+
+exit_cmd = parse_shebang.normalize_cmd(('bash', '-c', 'exit $1', '--'))
+# Abuse max_length to control the exit code
+max_length = len(' '.join(exit_cmd)) + 3
+
+
+def test_xargs_retcode_normal():
+ ret, _ = xargs.xargs(exit_cmd, ('0',), _max_length=max_length)
+ assert ret == 0
+
+ ret, _ = xargs.xargs(exit_cmd, ('0', '1'), _max_length=max_length)
+ assert ret == 1
+
+ # takes the maximum return code
+ ret, _ = xargs.xargs(exit_cmd, ('0', '5', '1'), _max_length=max_length)
+ assert ret == 5
+
+
+@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
+def test_xargs_retcode_killed_by_signal():
+ ret, _ = xargs.xargs(
+ parse_shebang.normalize_cmd(('bash', '-c', 'kill -9 $$', '--')),
+ ('foo', 'bar'),
+ )
+ assert ret == -9
+
+
+def test_xargs_concurrency():
+ bash_cmd = parse_shebang.normalize_cmd(('bash', '-c'))
+ print_pid = ('sleep 0.5 && echo $$',)
+
+ start = time.time()
+ ret, stdout = xargs.xargs(
+ bash_cmd, print_pid * 5,
+ target_concurrency=5,
+ _max_length=len(' '.join(bash_cmd + print_pid)) + 1,
+ )
+ elapsed = time.time() - start
+ assert ret == 0
+ pids = stdout.splitlines()
+ assert len(pids) == 5
+ # It would take 0.5*5=2.5 seconds to run all of these in serial, so if it
+ # takes less, they must have run concurrently.
+ assert elapsed < 2.5
+
+
+def test_thread_mapper_concurrency_uses_threadpoolexecutor_map():
+ with xargs._thread_mapper(10) as thread_map:
+ _self = thread_map.__self__ # type: ignore
+ assert isinstance(_self, concurrent.futures.ThreadPoolExecutor)
+
+
+def test_thread_mapper_concurrency_uses_regular_map():
+ with xargs._thread_mapper(1) as thread_map:
+ assert thread_map is map
+
+
+def test_xargs_propagate_kwargs_to_cmd():
+ env = {'PRE_COMMIT_TEST_VAR': 'Pre commit is awesome'}
+ cmd: tuple[str, ...] = ('bash', '-c', 'echo $PRE_COMMIT_TEST_VAR', '--')
+ cmd = parse_shebang.normalize_cmd(cmd)
+
+ ret, stdout = xargs.xargs(cmd, ('1',), env=env)
+ assert ret == 0
+ assert b'Pre commit is awesome' in stdout
+
+
+@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
+def test_xargs_color_true_makes_tty():
+ retcode, out = xargs.xargs(
+ (sys.executable, '-c', 'import sys; print(sys.stdout.isatty())'),
+ ('1',),
+ color=True,
+ )
+ assert retcode == 0
+ assert out == b'True\n'
+
+
+@pytest.mark.xfail(os.name == 'posix', reason='nt only')
+@pytest.mark.parametrize('filename', ('t.bat', 't.cmd', 'T.CMD'))
+def test_xargs_with_batch_files(tmpdir, filename):
+ f = tmpdir.join(filename)
+ f.write('echo it works\n')
+ retcode, out = xargs.xargs((str(f),), ('x',) * 8192)
+ assert retcode == 0, (retcode, out)