forked from repo-mirrors/sqlfluff
Prework for introducing mypyc (#6433)
This commit is contained in:
11
.github/workflows/ci-tests.yml
vendored
11
.github/workflows/ci-tests.yml
vendored
@@ -32,7 +32,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
job: [ 'linting', 'doclinting', 'docbuild', 'yamllint', 'mypy', 'doctests' ]
|
||||
job:
|
||||
[
|
||||
"linting",
|
||||
"doclinting",
|
||||
"docbuild",
|
||||
"yamllint",
|
||||
"mypy",
|
||||
"mypyc",
|
||||
"doctests",
|
||||
]
|
||||
include:
|
||||
# Default to most recent python version
|
||||
- python-version: "3.13"
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -58,3 +58,7 @@ plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml
|
||||
|
||||
# Emacs
|
||||
*~
|
||||
|
||||
# Mypyc outputs
|
||||
*.pyd
|
||||
*.so
|
||||
|
||||
@@ -36,7 +36,7 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.11.2
|
||||
rev: v1.13.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies:
|
||||
@@ -57,7 +57,7 @@ repos:
|
||||
# properly.
|
||||
jinja2,
|
||||
pathspec,
|
||||
pytest, # and by extension... pluggy
|
||||
pytest, # and by extension... pluggy
|
||||
click,
|
||||
]
|
||||
files: ^src/sqlfluff/.*
|
||||
|
||||
@@ -28,7 +28,7 @@ pytest-xdist
|
||||
# ----
|
||||
# `types-*` dependencies here should be the same as in `.pre-commit-config.yaml`.
|
||||
# If you update these dependencies, make sure to update those too.
|
||||
mypy
|
||||
mypy[mypyc]
|
||||
types-toml
|
||||
types-chardet
|
||||
types-appdirs
|
||||
|
||||
@@ -52,10 +52,10 @@ class APIParsingError(ValueError):
|
||||
|
||||
def __init__(self, violations: List[SQLBaseError], *args: Any):
|
||||
self.violations = violations
|
||||
self.msg = f"Found {len(violations)} issues while parsing string."
|
||||
msg = f"Found {len(violations)} issues while parsing string."
|
||||
for viol in violations:
|
||||
self.msg += f"\n{viol!s}"
|
||||
super().__init__(self.msg, *args)
|
||||
msg += f"\n{viol!s}"
|
||||
super().__init__(msg, *args)
|
||||
|
||||
|
||||
def lint(
|
||||
|
||||
@@ -49,6 +49,7 @@ from sqlfluff.core.linter.linting_result import LintingResult
|
||||
from sqlfluff.core.parser import Lexer, Parser
|
||||
from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix
|
||||
from sqlfluff.core.rules import BaseRule, RulePack, get_ruleset
|
||||
from sqlfluff.core.rules.fix import LintFix
|
||||
from sqlfluff.core.rules.noqa import IgnoreMask
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
@@ -384,7 +385,7 @@ class Linter:
|
||||
# the fixes themselves.
|
||||
initial_linting_errors = []
|
||||
# A placeholder for the fixes we had on the previous loop
|
||||
last_fixes = None
|
||||
last_fixes: Optional[List[LintFix]] = None
|
||||
# Keep a set of previous versions to catch infinite loops.
|
||||
previous_versions: Set[Tuple[str, Tuple["SourceFix", ...]]] = {(tree.raw, ())}
|
||||
# Keep a buffer for recording rule timings.
|
||||
|
||||
@@ -5,12 +5,14 @@ from dataclasses import dataclass
|
||||
from typing import (
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from sqlfluff.core.parser import (
|
||||
BaseSegment,
|
||||
)
|
||||
from sqlfluff.core.parser.markers import PositionMarker
|
||||
from sqlfluff.core.templaters import TemplatedFile
|
||||
|
||||
linter_logger = logging.getLogger("sqlfluff.linter")
|
||||
@@ -121,7 +123,7 @@ def _iter_templated_patches(
|
||||
source_idx = segment.pos_marker.source_slice.start
|
||||
templated_idx = segment.pos_marker.templated_slice.start
|
||||
insert_buff = ""
|
||||
first_segment_pos = None
|
||||
first_segment_pos: Optional[PositionMarker] = None
|
||||
for seg in segments:
|
||||
# First check for insertions.
|
||||
# At this stage, everything should have a position.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Definitions for Grammar."""
|
||||
|
||||
from typing import Optional, Sequence, Union
|
||||
from typing import Optional, Sequence, Tuple, Union
|
||||
|
||||
from sqlfluff.core.parser.context import ParseContext
|
||||
from sqlfluff.core.parser.grammar import Ref
|
||||
@@ -22,7 +22,7 @@ class Delimited(OneOf):
|
||||
as different options of what can be delimited, rather than a sequence.
|
||||
"""
|
||||
|
||||
equality_kwargs = (
|
||||
equality_kwargs: Tuple[str, ...] = (
|
||||
"_elements",
|
||||
"optional",
|
||||
"allow_gaps",
|
||||
|
||||
@@ -25,6 +25,7 @@ from dataclasses import dataclass
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
ClassVar,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
Iterator,
|
||||
@@ -166,13 +167,15 @@ class RuleMetaclass(type):
|
||||
"""
|
||||
|
||||
# Precompile the regular expressions
|
||||
_doc_search_regex = re.compile(
|
||||
_doc_search_regex: ClassVar = re.compile(
|
||||
"(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|"
|
||||
"\\s\\s{4}\\*\\*Configuration\\*\\*)",
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
_valid_classname_regex = regex.compile(r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z0-9]{4})")
|
||||
_valid_rule_name_regex = regex.compile(r"[a-z][a-z\.\_]+")
|
||||
_valid_classname_regex: ClassVar = regex.compile(
|
||||
r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z0-9]{4})"
|
||||
)
|
||||
_valid_rule_name_regex: ClassVar = regex.compile(r"[a-z][a-z\.\_]+")
|
||||
|
||||
@staticmethod
|
||||
def _populate_code_and_description(
|
||||
|
||||
@@ -242,7 +242,7 @@ class IgnoreMask:
|
||||
- Sorted in ascending order by line number
|
||||
"""
|
||||
ignore = False
|
||||
last_ignore = None
|
||||
last_ignore: Optional[NoQaDirective] = None
|
||||
for idx, ignore_rule in enumerate(ignore_rules):
|
||||
if ignore_rule.line_no > line_no:
|
||||
# Peak at the next rule to see if it's a matching disable
|
||||
|
||||
@@ -211,7 +211,7 @@ class TemplatedFile:
|
||||
)
|
||||
|
||||
# Consistency check templated string and slices.
|
||||
previous_slice = None
|
||||
previous_slice: Optional[TemplatedFileSlice] = None
|
||||
tfs: Optional[TemplatedFileSlice] = None
|
||||
for tfs in self.sliced_file:
|
||||
if previous_slice:
|
||||
@@ -291,7 +291,7 @@ class TemplatedFile:
|
||||
NB: the last_idx is exclusive, as the intent is to use this as a slice.
|
||||
"""
|
||||
start_idx = start_idx or 0
|
||||
first_idx = None
|
||||
first_idx: Optional[int] = None
|
||||
last_idx = start_idx
|
||||
# Work through the sliced file, starting at the start_idx if given
|
||||
# as an optimisation hint. The sliced_file is a list of TemplatedFileSlice
|
||||
|
||||
@@ -171,7 +171,7 @@ class PythonTemplater(RawTemplater):
|
||||
"""
|
||||
|
||||
name = "python"
|
||||
config_subsection = ("context",)
|
||||
config_subsection: Tuple[str, ...] = ("context",)
|
||||
|
||||
def __init__(self, override_context: Optional[Dict[str, Any]] = None) -> None:
|
||||
self.default_context = dict(test_value="__test__")
|
||||
|
||||
@@ -8,7 +8,17 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
|
||||
from typing import (
|
||||
Callable,
|
||||
ClassVar,
|
||||
Dict,
|
||||
List,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
import regex
|
||||
from jinja2 import Environment
|
||||
@@ -295,7 +305,7 @@ class JinjaAnalyzer:
|
||||
self.stack: List[int] = []
|
||||
self.idx_raw: int = 0
|
||||
|
||||
__known_tag_configurations = {
|
||||
__known_tag_configurations: ClassVar[dict[str, JinjaTagConfiguration]] = {
|
||||
# Conditional blocks: "if/elif/else/endif" blocks
|
||||
"if": JinjaTagConfiguration(
|
||||
block_type="block_start",
|
||||
@@ -385,9 +395,8 @@ class JinjaAnalyzer:
|
||||
# Ideally, we should have a known configuration for this Jinja tag. Derived
|
||||
# classes can override this method to provide additional information about the
|
||||
# tags they know about.
|
||||
known_cfg = cls.__known_tag_configurations.get(tag, None)
|
||||
if known_cfg:
|
||||
return known_cfg
|
||||
if tag in cls.__known_tag_configurations:
|
||||
return cls.__known_tag_configurations[tag]
|
||||
|
||||
# If we don't have a firm configuration for this tag that is most likely
|
||||
# provided by a Jinja extension, we'll try to make some guesses about it based
|
||||
|
||||
@@ -299,7 +299,7 @@ class ReflowPoint(ReflowElement):
|
||||
NOTE: This only returns _untemplated_ indents. If templated
|
||||
newline or whitespace segments are found they are skipped.
|
||||
"""
|
||||
indent = None
|
||||
indent: Optional[RawSegment] = None
|
||||
for seg in reversed(self.segments):
|
||||
if seg.pos_marker and not seg.pos_marker.is_literal():
|
||||
# Skip any templated elements.
|
||||
|
||||
@@ -729,9 +729,8 @@ def _revise_comment_lines(
|
||||
" Comment Only Line: %s. Anchoring to %s", comment_line_idx, idx
|
||||
)
|
||||
# Mutate reference lines to match this one.
|
||||
lines[comment_line_idx].initial_indent_balance = (
|
||||
line.initial_indent_balance
|
||||
)
|
||||
comment_line = lines[comment_line_idx]
|
||||
comment_line.initial_indent_balance = line.initial_indent_balance
|
||||
# Reset the buffer
|
||||
comment_line_buffer = []
|
||||
|
||||
@@ -830,7 +829,7 @@ def _crawl_indent_points(
|
||||
TODO: Once this function *works*, there's definitely headroom
|
||||
for simplification and optimisation. We should do that.
|
||||
"""
|
||||
last_line_break_idx = None
|
||||
last_line_break_idx: int | None = None
|
||||
indent_balance = 0
|
||||
untaken_indents: Tuple[int, ...] = ()
|
||||
cached_indent_stats: Optional[IndentStats] = None
|
||||
@@ -2192,7 +2191,7 @@ def lint_line_length(
|
||||
line_buffer: ReflowSequenceType = []
|
||||
results: List[LintResult] = []
|
||||
|
||||
last_indent_idx = None
|
||||
last_indent_idx: int | None = None
|
||||
for i, elem in enumerate(elem_buffer):
|
||||
# Are there newlines in the element?
|
||||
# If not, add it to the buffer and wait to evaluate the line.
|
||||
|
||||
@@ -283,7 +283,7 @@ def _determine_aligned_inline_spacing(
|
||||
return desired_space
|
||||
|
||||
# Work out the current spacing before each.
|
||||
last_code = None
|
||||
last_code: Optional[RawSegment] = None
|
||||
max_desired_line_pos = 0
|
||||
for seg in parent_segment.raw_segments:
|
||||
for sibling in siblings:
|
||||
|
||||
16
tox.ini
16
tox.ini
@@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{38,39,310,311,312,313}, dbt{140,150,160,170,180,190}, cov-report, mypy, winpy, dbt{150,180,190}-winpy, yamllint
|
||||
envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{38,39,310,311,312,313}, dbt{140,150,160,170,180,190}, cov-report, mypy, mypyc, winpy, dbt{150,180,190}-winpy, yamllint
|
||||
min_version = 4.0 # Require 4.0+ for proper pyproject.toml support
|
||||
|
||||
[testenv]
|
||||
@@ -120,6 +120,20 @@ commands =
|
||||
# Strict MyPy on the core package
|
||||
mypy -p sqlfluff.core --strict
|
||||
|
||||
[testenv:mypyc]
|
||||
skip_install = true
|
||||
changedir = src
|
||||
commands =
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.api
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.cli
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.config
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.dialects
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.helpers
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.linter
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.parser.grammar
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.core.plugin
|
||||
mypyc --config-file ../pyproject.toml -p sqlfluff.utils.reflow
|
||||
|
||||
[testenv:build-dist]
|
||||
skip_install = true
|
||||
deps =
|
||||
|
||||
Reference in New Issue
Block a user