diff options
Diffstat (limited to 'src/debputy/lsp/lsp_generic_deb822.py')
-rw-r--r-- | src/debputy/lsp/lsp_generic_deb822.py | 161 |
1 files changed, 161 insertions, 0 deletions
diff --git a/src/debputy/lsp/lsp_generic_deb822.py b/src/debputy/lsp/lsp_generic_deb822.py index 245f3de..7a1f96f 100644 --- a/src/debputy/lsp/lsp_generic_deb822.py +++ b/src/debputy/lsp/lsp_generic_deb822.py @@ -8,6 +8,8 @@ from typing import ( Any, Container, List, + Iterable, + Iterator, ) from lsprotocol.types import ( @@ -20,14 +22,27 @@ from lsprotocol.types import ( Hover, MarkupKind, HoverParams, + FoldingRangeParams, + FoldingRange, + FoldingRangeKind, + SemanticTokensParams, + SemanticTokens, ) from debputy.lsp.lsp_debian_control_reference_data import ( Deb822FileMetadata, Deb822KnownField, StanzaMetadata, + FieldValueClass, ) +from debputy.lsp.lsp_features import SEMANTIC_TOKEN_TYPES_IDS from debputy.lsp.text_util import normalize_dctrl_field_name +from debputy.lsp.vendoring._deb822_repro import parse_deb822_file +from debputy.lsp.vendoring._deb822_repro.parsing import ( + Deb822KeyValuePairElement, + LIST_SPACE_SEPARATED_INTERPRETATION, +) +from debputy.lsp.vendoring._deb822_repro.tokens import tokenize_deb822_file, Deb822Token from debputy.util import _info try: @@ -175,6 +190,152 @@ def deb822_hover( ) +def _deb822_token_iter( + tokens: Iterable[Deb822Token], +) -> Iterator[Tuple[Deb822Token, int, int, int, int, int]]: + line_no = 0 + line_offset = 0 + + for token in tokens: + start_line = line_no + start_line_offset = line_offset + + newlines = token.text.count("\n") + line_no += newlines + text_len = len(token.text) + if newlines: + if token.text.endswith("\n"): + line_offset = 0 + else: + # -2, one to remove the "\n" and one to get 0-offset + line_offset = text_len - token.text.rindex("\n") - 2 + else: + line_offset += text_len + + yield token, start_line, start_line_offset, line_no, line_offset + + +def deb822_folding_ranges( + ls: "LanguageServer", + params: FoldingRangeParams, + # Unused for now: might be relevant for supporting folding for some fields + _file_metadata: Deb822FileMetadata[Any], +) -> Optional[Sequence[FoldingRange]]: + doc = ls.workspace.get_text_document(params.text_document.uri) + comment_start = -1 + folding_ranges = [] + for ( + token, + start_line, + start_offset, + end_line, + end_offset, + ) in _deb822_token_iter(tokenize_deb822_file(doc.lines)): + if token.is_comment: + if comment_start < 0: + comment_start = start_line + elif comment_start > -1: + comment_start = -1 + folding_range = FoldingRange( + comment_start, + end_line, + kind=FoldingRangeKind.Comment, + ) + + folding_ranges.append(folding_range) + + return folding_ranges + + +def deb822_semantic_tokens_full( + ls: "LanguageServer", + request: SemanticTokensParams, + file_metadata: Deb822FileMetadata[Any], +) -> Optional[SemanticTokens]: + doc = ls.workspace.get_text_document(request.text_document.uri) + lines = doc.lines + deb822_file = parse_deb822_file( + lines, + accept_files_with_duplicated_fields=True, + accept_files_with_error_tokens=True, + ) + tokens = [] + previous_line = 0 + keyword_token_code = SEMANTIC_TOKEN_TYPES_IDS["keyword"] + known_value_token_code = SEMANTIC_TOKEN_TYPES_IDS["enumMember"] + no_modifiers = 0 + + # TODO: Add comment support; slightly complicated by how we parse the file. + + for stanza_idx, stanza in enumerate(deb822_file): + stanza_position = stanza.position_in_file() + stanza_metadata = file_metadata.classify_stanza(stanza, stanza_idx=stanza_idx) + for kvpair in stanza.iter_parts_of_type(Deb822KeyValuePairElement): + kvpair_pos = kvpair.position_in_parent().relative_to(stanza_position) + # These two happen to be the same; the indirection is to make it explicit that the two + # positions for different tokens are the same. + field_position_without_comments = kvpair_pos + field_size = doc.position_codec.client_num_units(kvpair.field_name) + current_line = field_position_without_comments.line_position + line_delta = current_line - previous_line + previous_line = current_line + tokens.append(line_delta) # Line delta + tokens.append(0) # Token column delta + tokens.append(field_size) # Token length + tokens.append(keyword_token_code) + tokens.append(no_modifiers) + + known_field: Optional[Deb822KnownField] = stanza_metadata.get( + kvpair.field_name + ) + if ( + known_field is None + or not known_field.known_values + or known_field.spellcheck_value + ): + continue + + if known_field.field_value_class not in ( + FieldValueClass.SINGLE_VALUE, + FieldValueClass.SPACE_SEPARATED_LIST, + ): + continue + value_element_pos = kvpair.value_element.position_in_parent().relative_to( + kvpair_pos + ) + + last_token_start_column = 0 + + for value_ref in kvpair.interpret_as( + LIST_SPACE_SEPARATED_INTERPRETATION + ).iter_value_references(): + if value_ref.value not in known_field.known_values: + continue + value_loc = value_ref.locatable + value_range_te = value_loc.range_in_parent().relative_to( + value_element_pos + ) + start_line = value_range_te.start_pos.line_position + line_delta = start_line - current_line + current_line = start_line + if line_delta: + last_token_start_column = 0 + + value_start_column = value_range_te.start_pos.cursor_position + column_delta = value_start_column - last_token_start_column + last_token_start_column = value_start_column + + tokens.append(line_delta) # Line delta + tokens.append(column_delta) # Token column delta + tokens.append(field_size) # Token length + tokens.append(known_value_token_code) + tokens.append(no_modifiers) + + if not tokens: + return None + return SemanticTokens(tokens) + + def _should_complete_field_with_value(cand: Deb822KnownField) -> bool: return cand.known_values is not None and ( len(cand.known_values) == 1 |