summaryrefslogtreecommitdiffstats
path: root/pre_commit_hooks
diff options
context:
space:
mode:
Diffstat (limited to 'pre_commit_hooks')
-rw-r--r--pre_commit_hooks/check_added_large_files.py2
-rw-r--r--pre_commit_hooks/check_yaml.py2
-rw-r--r--pre_commit_hooks/debug_statement_hook.py1
-rw-r--r--pre_commit_hooks/destroyed_symlinks.py6
-rw-r--r--pre_commit_hooks/file_contents_sorter.py5
-rw-r--r--pre_commit_hooks/string_fixer.py15
6 files changed, 22 insertions, 9 deletions
diff --git a/pre_commit_hooks/check_added_large_files.py b/pre_commit_hooks/check_added_large_files.py
index 79c8d4e..9e0619b 100644
--- a/pre_commit_hooks/check_added_large_files.py
+++ b/pre_commit_hooks/check_added_large_files.py
@@ -46,7 +46,7 @@ def find_large_added_files(
filenames_filtered &= added_files()
for filename in filenames_filtered:
- kb = int(math.ceil(os.stat(filename).st_size / 1024))
+ kb = math.ceil(os.stat(filename).st_size / 1024)
if kb > maxkb:
print(f'{filename} ({kb} KB) exceeds {maxkb} KB.')
retv = 1
diff --git a/pre_commit_hooks/check_yaml.py b/pre_commit_hooks/check_yaml.py
index 250794e..9563347 100644
--- a/pre_commit_hooks/check_yaml.py
+++ b/pre_commit_hooks/check_yaml.py
@@ -46,7 +46,7 @@ def main(argv: Sequence[str] | None = None) -> int:
'--unsafe', action='store_true',
help=(
'Instead of loading the files, simply parse them for syntax. '
- 'A syntax-only check enables extensions and unsafe contstructs '
+ 'A syntax-only check enables extensions and unsafe constructs '
'which would otherwise be forbidden. Using this option removes '
'all guarantees of portability to other yaml implementations. '
'Implies --allow-multiple-documents'
diff --git a/pre_commit_hooks/debug_statement_hook.py b/pre_commit_hooks/debug_statement_hook.py
index 9ada657..cf544c7 100644
--- a/pre_commit_hooks/debug_statement_hook.py
+++ b/pre_commit_hooks/debug_statement_hook.py
@@ -8,6 +8,7 @@ from typing import Sequence
DEBUG_STATEMENTS = {
+ 'bpdb',
'ipdb',
'pdb',
'pdbr',
diff --git a/pre_commit_hooks/destroyed_symlinks.py b/pre_commit_hooks/destroyed_symlinks.py
index 88253c0..f256908 100644
--- a/pre_commit_hooks/destroyed_symlinks.py
+++ b/pre_commit_hooks/destroyed_symlinks.py
@@ -76,11 +76,7 @@ def main(argv: Sequence[str] | None = None) -> int:
for destroyed_link in destroyed_links:
print(f'- {destroyed_link}')
print('You should unstage affected files:')
- print(
- '\tgit reset HEAD -- {}'.format(
- ' '.join(shlex.quote(link) for link in destroyed_links),
- ),
- )
+ print(f'\tgit reset HEAD -- {shlex.join(destroyed_links)}')
print(
'And retry commit. As a long term solution '
'you may try to explicitly tell git that your '
diff --git a/pre_commit_hooks/file_contents_sorter.py b/pre_commit_hooks/file_contents_sorter.py
index c5691f0..02bdbcc 100644
--- a/pre_commit_hooks/file_contents_sorter.py
+++ b/pre_commit_hooks/file_contents_sorter.py
@@ -37,7 +37,10 @@ def sort_file_contents(
after = sorted(lines, key=key)
before_string = b''.join(before)
- after_string = b'\n'.join(after) + b'\n'
+ after_string = b'\n'.join(after)
+
+ if after_string:
+ after_string += b'\n'
if before_string == after_string:
return PASS
diff --git a/pre_commit_hooks/string_fixer.py b/pre_commit_hooks/string_fixer.py
index 0ef9bc7..d1b1c4a 100644
--- a/pre_commit_hooks/string_fixer.py
+++ b/pre_commit_hooks/string_fixer.py
@@ -3,9 +3,16 @@ from __future__ import annotations
import argparse
import io
import re
+import sys
import tokenize
from typing import Sequence
+if sys.version_info >= (3, 12): # pragma: >=3.12 cover
+ FSTRING_START = tokenize.FSTRING_START
+ FSTRING_END = tokenize.FSTRING_END
+else: # pragma: <3.12 cover
+ FSTRING_START = FSTRING_END = -1
+
START_QUOTE_RE = re.compile('^[a-zA-Z]*"')
@@ -40,11 +47,17 @@ def fix_strings(filename: str) -> int:
# Basically a mutable string
splitcontents = list(contents)
+ fstring_depth = 0
+
# Iterate in reverse so the offsets are always correct
tokens_l = list(tokenize.generate_tokens(io.StringIO(contents).readline))
tokens = reversed(tokens_l)
for token_type, token_text, (srow, scol), (erow, ecol), _ in tokens:
- if token_type == tokenize.STRING:
+ if token_type == FSTRING_START: # pragma: >=3.12 cover
+ fstring_depth += 1
+ elif token_type == FSTRING_END: # pragma: >=3.12 cover
+ fstring_depth -= 1
+ elif fstring_depth == 0 and token_type == tokenize.STRING:
new_text = handle_match(token_text)
splitcontents[
line_offsets[srow] + scol: