init
This commit is contained in:
commit
38355d2442
9083 changed files with 1225834 additions and 0 deletions
38
.venv/lib/python3.8/site-packages/isort/__init__.py
Normal file
38
.venv/lib/python3.8/site-packages/isort/__init__.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
"""Defines the public isort interface"""
|
||||
__all__ = (
|
||||
"Config",
|
||||
"ImportKey",
|
||||
"__version__",
|
||||
"check_code",
|
||||
"check_file",
|
||||
"check_stream",
|
||||
"code",
|
||||
"file",
|
||||
"find_imports_in_code",
|
||||
"find_imports_in_file",
|
||||
"find_imports_in_paths",
|
||||
"find_imports_in_stream",
|
||||
"place_module",
|
||||
"place_module_with_reason",
|
||||
"settings",
|
||||
"stream",
|
||||
)
|
||||
|
||||
from . import settings
|
||||
from ._version import __version__
|
||||
from .api import ImportKey
|
||||
from .api import check_code_string as check_code
|
||||
from .api import (
|
||||
check_file,
|
||||
check_stream,
|
||||
find_imports_in_code,
|
||||
find_imports_in_file,
|
||||
find_imports_in_paths,
|
||||
find_imports_in_stream,
|
||||
place_module,
|
||||
place_module_with_reason,
|
||||
)
|
||||
from .api import sort_code_string as code
|
||||
from .api import sort_file as file
|
||||
from .api import sort_stream as stream
|
||||
from .settings import Config
|
||||
3
.venv/lib/python3.8/site-packages/isort/__main__.py
Normal file
3
.venv/lib/python3.8/site-packages/isort/__main__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from isort.main import main
|
||||
|
||||
main()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
12
.venv/lib/python3.8/site-packages/isort/_future/__init__.py
Normal file
12
.venv/lib/python3.8/site-packages/isort/_future/__init__.py
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
import sys
|
||||
|
||||
if sys.version_info.major <= 3 and sys.version_info.minor <= 6:
|
||||
from . import _dataclasses as dataclasses
|
||||
|
||||
else:
|
||||
import dataclasses # type: ignore
|
||||
|
||||
dataclass = dataclasses.dataclass # type: ignore
|
||||
field = dataclasses.field # type: ignore
|
||||
|
||||
__all__ = ["dataclasses", "dataclass", "field"]
|
||||
Binary file not shown.
Binary file not shown.
1209
.venv/lib/python3.8/site-packages/isort/_future/_dataclasses.py
Normal file
1209
.venv/lib/python3.8/site-packages/isort/_future/_dataclasses.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2021 Taneli Hukkinen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
"""A lil' TOML parser."""
|
||||
|
||||
__all__ = ("loads", "load", "TOMLDecodeError")
|
||||
__version__ = "1.2.0" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
|
||||
|
||||
from ._parser import TOMLDecodeError, load, loads
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,650 @@
|
|||
import string
|
||||
import warnings
|
||||
from types import MappingProxyType
|
||||
from typing import IO, Any, Callable, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple
|
||||
|
||||
from ._re import (
|
||||
RE_DATETIME,
|
||||
RE_LOCALTIME,
|
||||
RE_NUMBER,
|
||||
match_to_datetime,
|
||||
match_to_localtime,
|
||||
match_to_number,
|
||||
)
|
||||
|
||||
ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
|
||||
|
||||
# Neither of these sets include quotation mark or backslash. They are
|
||||
# currently handled as separate cases in the parser functions.
|
||||
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
|
||||
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n\r")
|
||||
|
||||
ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
|
||||
ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
|
||||
|
||||
ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
|
||||
|
||||
TOML_WS = frozenset(" \t")
|
||||
TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
|
||||
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
|
||||
KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
|
||||
HEXDIGIT_CHARS = frozenset(string.hexdigits)
|
||||
|
||||
BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
|
||||
{
|
||||
"\\b": "\u0008", # backspace
|
||||
"\\t": "\u0009", # tab
|
||||
"\\n": "\u000A", # linefeed
|
||||
"\\f": "\u000C", # form feed
|
||||
"\\r": "\u000D", # carriage return
|
||||
'\\"': "\u0022", # quote
|
||||
"\\\\": "\u005C", # backslash
|
||||
}
|
||||
)
|
||||
|
||||
# Type annotations
|
||||
ParseFloat = Callable[[str], Any]
|
||||
Key = Tuple[str, ...]
|
||||
Pos = int
|
||||
|
||||
|
||||
class TOMLDecodeError(ValueError):
|
||||
"""An error raised if a document is not valid TOML."""
|
||||
|
||||
|
||||
def load(fp: IO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
|
||||
"""Parse TOML from a file object."""
|
||||
s = fp.read()
|
||||
if isinstance(s, bytes):
|
||||
s = s.decode()
|
||||
else:
|
||||
warnings.warn(
|
||||
"Text file object support is deprecated in favor of binary file objects."
|
||||
' Use `open("foo.toml", "rb")` to open the file in binary mode.',
|
||||
DeprecationWarning,
|
||||
)
|
||||
return loads(s, parse_float=parse_float)
|
||||
|
||||
|
||||
def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901
|
||||
"""Parse TOML from a string."""
|
||||
|
||||
# The spec allows converting "\r\n" to "\n", even in string
|
||||
# literals. Let's do so to simplify parsing.
|
||||
src = s.replace("\r\n", "\n")
|
||||
pos = 0
|
||||
out = Output(NestedDict(), Flags())
|
||||
header: Key = ()
|
||||
|
||||
# Parse one statement at a time
|
||||
# (typically means one line in TOML source)
|
||||
while True:
|
||||
# 1. Skip line leading whitespace
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
# 2. Parse rules. Expect one of the following:
|
||||
# - end of file
|
||||
# - end of line
|
||||
# - comment
|
||||
# - key/value pair
|
||||
# - append dict to list (and move to its namespace)
|
||||
# - create dict (and move to its namespace)
|
||||
# Skip trailing whitespace when applicable.
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
break
|
||||
if char == "\n":
|
||||
pos += 1
|
||||
continue
|
||||
if char in KEY_INITIAL_CHARS:
|
||||
pos = key_value_rule(src, pos, out, header, parse_float)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
elif char == "[":
|
||||
try:
|
||||
second_char: Optional[str] = src[pos + 1]
|
||||
except IndexError:
|
||||
second_char = None
|
||||
if second_char == "[":
|
||||
pos, header = create_list_rule(src, pos, out)
|
||||
else:
|
||||
pos, header = create_dict_rule(src, pos, out)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
elif char != "#":
|
||||
raise suffixed_err(src, pos, "Invalid statement")
|
||||
|
||||
# 3. Skip comment
|
||||
pos = skip_comment(src, pos)
|
||||
|
||||
# 4. Expect end of line or end of file
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
break
|
||||
if char != "\n":
|
||||
raise suffixed_err(src, pos, "Expected newline or end of document after a statement")
|
||||
pos += 1
|
||||
|
||||
return out.data.dict
|
||||
|
||||
|
||||
class Flags:
|
||||
"""Flags that map to parsed keys/namespaces."""
|
||||
|
||||
# Marks an immutable namespace (inline array or inline table).
|
||||
FROZEN = 0
|
||||
# Marks a nest that has been explicitly created and can no longer
|
||||
# be opened using the "[table]" syntax.
|
||||
EXPLICIT_NEST = 1
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._flags: Dict[str, dict] = {}
|
||||
|
||||
def unset_all(self, key: Key) -> None:
|
||||
cont = self._flags
|
||||
for k in key[:-1]:
|
||||
if k not in cont:
|
||||
return
|
||||
cont = cont[k]["nested"]
|
||||
cont.pop(key[-1], None)
|
||||
|
||||
def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None:
|
||||
cont = self._flags
|
||||
for k in head_key:
|
||||
if k not in cont:
|
||||
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
|
||||
cont = cont[k]["nested"]
|
||||
for k in rel_key:
|
||||
if k in cont:
|
||||
cont[k]["flags"].add(flag)
|
||||
else:
|
||||
cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}}
|
||||
cont = cont[k]["nested"]
|
||||
|
||||
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
|
||||
cont = self._flags
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
for k in key_parent:
|
||||
if k not in cont:
|
||||
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
|
||||
cont = cont[k]["nested"]
|
||||
if key_stem not in cont:
|
||||
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
|
||||
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
|
||||
|
||||
def is_(self, key: Key, flag: int) -> bool:
|
||||
if not key:
|
||||
return False # document root has no flags
|
||||
cont = self._flags
|
||||
for k in key[:-1]:
|
||||
if k not in cont:
|
||||
return False
|
||||
inner_cont = cont[k]
|
||||
if flag in inner_cont["recursive_flags"]:
|
||||
return True
|
||||
cont = inner_cont["nested"]
|
||||
key_stem = key[-1]
|
||||
if key_stem in cont:
|
||||
cont = cont[key_stem]
|
||||
return flag in cont["flags"] or flag in cont["recursive_flags"]
|
||||
return False
|
||||
|
||||
|
||||
class NestedDict:
|
||||
def __init__(self) -> None:
|
||||
# The parsed content of the TOML document
|
||||
self.dict: Dict[str, Any] = {}
|
||||
|
||||
def get_or_create_nest(
|
||||
self,
|
||||
key: Key,
|
||||
*,
|
||||
access_lists: bool = True,
|
||||
) -> dict:
|
||||
cont: Any = self.dict
|
||||
for k in key:
|
||||
if k not in cont:
|
||||
cont[k] = {}
|
||||
cont = cont[k]
|
||||
if access_lists and isinstance(cont, list):
|
||||
cont = cont[-1]
|
||||
if not isinstance(cont, dict):
|
||||
raise KeyError("There is no nest behind this key")
|
||||
return cont
|
||||
|
||||
def append_nest_to_list(self, key: Key) -> None:
|
||||
cont = self.get_or_create_nest(key[:-1])
|
||||
last_key = key[-1]
|
||||
if last_key in cont:
|
||||
list_ = cont[last_key]
|
||||
if not isinstance(list_, list):
|
||||
raise KeyError("An object other than list found behind this key")
|
||||
list_.append({})
|
||||
else:
|
||||
cont[last_key] = [{}]
|
||||
|
||||
|
||||
class Output(NamedTuple):
|
||||
data: NestedDict
|
||||
flags: Flags
|
||||
|
||||
|
||||
def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
|
||||
try:
|
||||
while src[pos] in chars:
|
||||
pos += 1
|
||||
except IndexError:
|
||||
pass
|
||||
return pos
|
||||
|
||||
|
||||
def skip_until(
|
||||
src: str,
|
||||
pos: Pos,
|
||||
expect: str,
|
||||
*,
|
||||
error_on: FrozenSet[str],
|
||||
error_on_eof: bool,
|
||||
) -> Pos:
|
||||
try:
|
||||
new_pos = src.index(expect, pos)
|
||||
except ValueError:
|
||||
new_pos = len(src)
|
||||
if error_on_eof:
|
||||
raise suffixed_err(src, new_pos, f'Expected "{expect!r}"')
|
||||
|
||||
if not error_on.isdisjoint(src[pos:new_pos]):
|
||||
while src[pos] not in error_on:
|
||||
pos += 1
|
||||
raise suffixed_err(src, pos, f'Found invalid character "{src[pos]!r}"')
|
||||
return new_pos
|
||||
|
||||
|
||||
def skip_comment(src: str, pos: Pos) -> Pos:
|
||||
try:
|
||||
char: Optional[str] = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char == "#":
|
||||
return skip_until(src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False)
|
||||
return pos
|
||||
|
||||
|
||||
def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
|
||||
while True:
|
||||
pos_before_skip = pos
|
||||
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
|
||||
pos = skip_comment(src, pos)
|
||||
if pos == pos_before_skip:
|
||||
return pos
|
||||
|
||||
|
||||
def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
|
||||
pos += 1 # Skip "["
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key = parse_key(src, pos)
|
||||
|
||||
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Can not declare {key} twice")
|
||||
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
|
||||
try:
|
||||
out.data.get_or_create_nest(key)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Can not overwrite a value")
|
||||
|
||||
if not src.startswith("]", pos):
|
||||
raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration')
|
||||
return pos + 1, key
|
||||
|
||||
|
||||
def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
|
||||
pos += 2 # Skip "[["
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key = parse_key(src, pos)
|
||||
|
||||
if out.flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
|
||||
# Free the namespace now that it points to another empty list item...
|
||||
out.flags.unset_all(key)
|
||||
# ...but this key precisely is still prohibited from table declaration
|
||||
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
|
||||
try:
|
||||
out.data.append_nest_to_list(key)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Can not overwrite a value")
|
||||
|
||||
if not src.startswith("]]", pos):
|
||||
raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration')
|
||||
return pos + 2, key
|
||||
|
||||
|
||||
def key_value_rule(src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat) -> Pos:
|
||||
pos, key, value = parse_key_value_pair(src, pos, parse_float)
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
abs_key_parent = header + key_parent
|
||||
|
||||
if out.flags.is_(abs_key_parent, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Can not mutate immutable namespace {abs_key_parent}")
|
||||
# Containers in the relative path can't be opened with the table syntax after this
|
||||
out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST)
|
||||
try:
|
||||
nest = out.data.get_or_create_nest(abs_key_parent)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Can not overwrite a value")
|
||||
if key_stem in nest:
|
||||
raise suffixed_err(src, pos, "Can not overwrite a value")
|
||||
# Mark inline table and array namespaces recursively immutable
|
||||
if isinstance(value, (dict, list)):
|
||||
out.flags.set(header + key, Flags.FROZEN, recursive=True)
|
||||
nest[key_stem] = value
|
||||
return pos
|
||||
|
||||
|
||||
def parse_key_value_pair(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Key, Any]:
|
||||
pos, key = parse_key(src, pos)
|
||||
try:
|
||||
char: Optional[str] = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char != "=":
|
||||
raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair')
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, value = parse_value(src, pos, parse_float)
|
||||
return pos, key, value
|
||||
|
||||
|
||||
def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]:
|
||||
pos, key_part = parse_key_part(src, pos)
|
||||
key: Key = (key_part,)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
while True:
|
||||
try:
|
||||
char: Optional[str] = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char != ".":
|
||||
return pos, key
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
pos, key_part = parse_key_part(src, pos)
|
||||
key += (key_part,)
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
|
||||
def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]:
|
||||
try:
|
||||
char: Optional[str] = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
if char in BARE_KEY_CHARS:
|
||||
start_pos = pos
|
||||
pos = skip_chars(src, pos, BARE_KEY_CHARS)
|
||||
return pos, src[start_pos:pos]
|
||||
if char == "'":
|
||||
return parse_literal_str(src, pos)
|
||||
if char == '"':
|
||||
return parse_one_line_basic_str(src, pos)
|
||||
raise suffixed_err(src, pos, "Invalid initial character for a key part")
|
||||
|
||||
|
||||
def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]:
|
||||
pos += 1
|
||||
return parse_basic_str(src, pos, multiline=False)
|
||||
|
||||
|
||||
def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]:
|
||||
pos += 1
|
||||
array: list = []
|
||||
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
if src.startswith("]", pos):
|
||||
return pos + 1, array
|
||||
while True:
|
||||
pos, val = parse_value(src, pos, parse_float)
|
||||
array.append(val)
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
|
||||
c = src[pos : pos + 1]
|
||||
if c == "]":
|
||||
return pos + 1, array
|
||||
if c != ",":
|
||||
raise suffixed_err(src, pos, "Unclosed array")
|
||||
pos += 1
|
||||
|
||||
pos = skip_comments_and_array_ws(src, pos)
|
||||
if src.startswith("]", pos):
|
||||
return pos + 1, array
|
||||
|
||||
|
||||
def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]:
|
||||
pos += 1
|
||||
nested_dict = NestedDict()
|
||||
flags = Flags()
|
||||
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
if src.startswith("}", pos):
|
||||
return pos + 1, nested_dict.dict
|
||||
while True:
|
||||
pos, key, value = parse_key_value_pair(src, pos, parse_float)
|
||||
key_parent, key_stem = key[:-1], key[-1]
|
||||
if flags.is_(key, Flags.FROZEN):
|
||||
raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
|
||||
try:
|
||||
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
|
||||
except KeyError:
|
||||
raise suffixed_err(src, pos, "Can not overwrite a value")
|
||||
if key_stem in nest:
|
||||
raise suffixed_err(src, pos, f'Duplicate inline table key "{key_stem}"')
|
||||
nest[key_stem] = value
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
c = src[pos : pos + 1]
|
||||
if c == "}":
|
||||
return pos + 1, nested_dict.dict
|
||||
if c != ",":
|
||||
raise suffixed_err(src, pos, "Unclosed inline table")
|
||||
if isinstance(value, (dict, list)):
|
||||
flags.set(key, Flags.FROZEN, recursive=True)
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
|
||||
|
||||
def parse_basic_str_escape( # noqa: C901
|
||||
src: str, pos: Pos, *, multiline: bool = False
|
||||
) -> Tuple[Pos, str]:
|
||||
escape_id = src[pos : pos + 2]
|
||||
pos += 2
|
||||
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
|
||||
# Skip whitespace until next non-whitespace character or end of
|
||||
# the doc. Error if non-whitespace is found before newline.
|
||||
if escape_id != "\\\n":
|
||||
pos = skip_chars(src, pos, TOML_WS)
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
return pos, ""
|
||||
if char != "\n":
|
||||
raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
|
||||
pos += 1
|
||||
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
|
||||
return pos, ""
|
||||
if escape_id == "\\u":
|
||||
return parse_hex_char(src, pos, 4)
|
||||
if escape_id == "\\U":
|
||||
return parse_hex_char(src, pos, 8)
|
||||
try:
|
||||
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
|
||||
except KeyError:
|
||||
if len(escape_id) != 2:
|
||||
raise suffixed_err(src, pos, "Unterminated string")
|
||||
raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
|
||||
|
||||
|
||||
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]:
|
||||
return parse_basic_str_escape(src, pos, multiline=True)
|
||||
|
||||
|
||||
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]:
|
||||
hex_str = src[pos : pos + hex_len]
|
||||
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
|
||||
raise suffixed_err(src, pos, "Invalid hex value")
|
||||
pos += hex_len
|
||||
hex_int = int(hex_str, 16)
|
||||
if not is_unicode_scalar_value(hex_int):
|
||||
raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
|
||||
return pos, chr(hex_int)
|
||||
|
||||
|
||||
def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]:
|
||||
pos += 1 # Skip starting apostrophe
|
||||
start_pos = pos
|
||||
pos = skip_until(src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True)
|
||||
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
|
||||
|
||||
|
||||
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]:
|
||||
pos += 3
|
||||
if src.startswith("\n", pos):
|
||||
pos += 1
|
||||
|
||||
if literal:
|
||||
delim = "'"
|
||||
end_pos = skip_until(
|
||||
src,
|
||||
pos,
|
||||
"'''",
|
||||
error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
|
||||
error_on_eof=True,
|
||||
)
|
||||
result = src[pos:end_pos]
|
||||
pos = end_pos + 3
|
||||
else:
|
||||
delim = '"'
|
||||
pos, result = parse_basic_str(src, pos, multiline=True)
|
||||
|
||||
# Add at maximum two extra apostrophes/quotes if the end sequence
|
||||
# is 4 or 5 chars long instead of just 3.
|
||||
if not src.startswith(delim, pos):
|
||||
return pos, result
|
||||
pos += 1
|
||||
if not src.startswith(delim, pos):
|
||||
return pos, result + delim
|
||||
pos += 1
|
||||
return pos, result + (delim * 2)
|
||||
|
||||
|
||||
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]:
|
||||
if multiline:
|
||||
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
|
||||
parse_escapes = parse_basic_str_escape_multiline
|
||||
else:
|
||||
error_on = ILLEGAL_BASIC_STR_CHARS
|
||||
parse_escapes = parse_basic_str_escape
|
||||
result = ""
|
||||
start_pos = pos
|
||||
while True:
|
||||
try:
|
||||
char = src[pos]
|
||||
except IndexError:
|
||||
raise suffixed_err(src, pos, "Unterminated string")
|
||||
if char == '"':
|
||||
if not multiline:
|
||||
return pos + 1, result + src[start_pos:pos]
|
||||
if src.startswith('"""', pos):
|
||||
return pos + 3, result + src[start_pos:pos]
|
||||
pos += 1
|
||||
continue
|
||||
if char == "\\":
|
||||
result += src[start_pos:pos]
|
||||
pos, parsed_escape = parse_escapes(src, pos)
|
||||
result += parsed_escape
|
||||
start_pos = pos
|
||||
continue
|
||||
if char in error_on:
|
||||
raise suffixed_err(src, pos, f'Illegal character "{char!r}"')
|
||||
pos += 1
|
||||
|
||||
|
||||
def parse_value(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Any]: # noqa: C901
|
||||
try:
|
||||
char: Optional[str] = src[pos]
|
||||
except IndexError:
|
||||
char = None
|
||||
|
||||
# Basic strings
|
||||
if char == '"':
|
||||
if src.startswith('"""', pos):
|
||||
return parse_multiline_str(src, pos, literal=False)
|
||||
return parse_one_line_basic_str(src, pos)
|
||||
|
||||
# Literal strings
|
||||
if char == "'":
|
||||
if src.startswith("'''", pos):
|
||||
return parse_multiline_str(src, pos, literal=True)
|
||||
return parse_literal_str(src, pos)
|
||||
|
||||
# Booleans
|
||||
if char == "t":
|
||||
if src.startswith("true", pos):
|
||||
return pos + 4, True
|
||||
if char == "f":
|
||||
if src.startswith("false", pos):
|
||||
return pos + 5, False
|
||||
|
||||
# Dates and times
|
||||
datetime_match = RE_DATETIME.match(src, pos)
|
||||
if datetime_match:
|
||||
try:
|
||||
datetime_obj = match_to_datetime(datetime_match)
|
||||
except ValueError:
|
||||
raise suffixed_err(src, pos, "Invalid date or datetime")
|
||||
return datetime_match.end(), datetime_obj
|
||||
localtime_match = RE_LOCALTIME.match(src, pos)
|
||||
if localtime_match:
|
||||
return localtime_match.end(), match_to_localtime(localtime_match)
|
||||
|
||||
# Integers and "normal" floats.
|
||||
# The regex will greedily match any type starting with a decimal
|
||||
# char, so needs to be located after handling of dates and times.
|
||||
number_match = RE_NUMBER.match(src, pos)
|
||||
if number_match:
|
||||
return number_match.end(), match_to_number(number_match, parse_float)
|
||||
|
||||
# Arrays
|
||||
if char == "[":
|
||||
return parse_array(src, pos, parse_float)
|
||||
|
||||
# Inline tables
|
||||
if char == "{":
|
||||
return parse_inline_table(src, pos, parse_float)
|
||||
|
||||
# Special floats
|
||||
first_three = src[pos : pos + 3]
|
||||
if first_three in {"inf", "nan"}:
|
||||
return pos + 3, parse_float(first_three)
|
||||
first_four = src[pos : pos + 4]
|
||||
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
|
||||
return pos + 4, parse_float(first_four)
|
||||
|
||||
raise suffixed_err(src, pos, "Invalid value")
|
||||
|
||||
|
||||
def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
|
||||
"""Return a `TOMLDecodeError` where error message is suffixed with
|
||||
coordinates in source."""
|
||||
|
||||
def coord_repr(src: str, pos: Pos) -> str:
|
||||
if pos >= len(src):
|
||||
return "end of document"
|
||||
line = src.count("\n", 0, pos) + 1
|
||||
if line == 1:
|
||||
column = pos + 1
|
||||
else:
|
||||
column = pos - src.rindex("\n", 0, pos)
|
||||
return f"line {line}, column {column}"
|
||||
|
||||
return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
|
||||
|
||||
|
||||
def is_unicode_scalar_value(codepoint: int) -> bool:
|
||||
return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
|
||||
100
.venv/lib/python3.8/site-packages/isort/_vendored/tomli/_re.py
Normal file
100
.venv/lib/python3.8/site-packages/isort/_vendored/tomli/_re.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import re
|
||||
from datetime import date, datetime, time, timedelta, timezone, tzinfo
|
||||
from functools import lru_cache
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from tomli._parser import ParseFloat
|
||||
|
||||
# E.g.
|
||||
# - 00:32:00.999999
|
||||
# - 00:32:00
|
||||
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
|
||||
|
||||
RE_NUMBER = re.compile(
|
||||
r"""
|
||||
0
|
||||
(?:
|
||||
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
||||
|
|
||||
b[01](?:_?[01])* # bin
|
||||
|
|
||||
o[0-7](?:_?[0-7])* # oct
|
||||
)
|
||||
|
|
||||
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
|
||||
(?P<floatpart>
|
||||
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
|
||||
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
|
||||
)
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
RE_LOCALTIME = re.compile(_TIME_RE_STR)
|
||||
RE_DATETIME = re.compile(
|
||||
fr"""
|
||||
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
|
||||
(?:
|
||||
[T ]
|
||||
{_TIME_RE_STR}
|
||||
(?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
|
||||
)?
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def match_to_datetime(match: "re.Match") -> Union[datetime, date]:
|
||||
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
|
||||
|
||||
Raises ValueError if the match does not correspond to a valid date
|
||||
or datetime.
|
||||
"""
|
||||
(
|
||||
year_str,
|
||||
month_str,
|
||||
day_str,
|
||||
hour_str,
|
||||
minute_str,
|
||||
sec_str,
|
||||
micros_str,
|
||||
zulu_time,
|
||||
offset_sign_str,
|
||||
offset_hour_str,
|
||||
offset_minute_str,
|
||||
) = match.groups()
|
||||
year, month, day = int(year_str), int(month_str), int(day_str)
|
||||
if hour_str is None:
|
||||
return date(year, month, day)
|
||||
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
|
||||
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
||||
if offset_sign_str:
|
||||
tz: Optional[tzinfo] = cached_tz(offset_hour_str, offset_minute_str, offset_sign_str)
|
||||
elif zulu_time:
|
||||
tz = timezone.utc
|
||||
else: # local date-time
|
||||
tz = None
|
||||
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
|
||||
sign = 1 if sign_str == "+" else -1
|
||||
return timezone(
|
||||
timedelta(
|
||||
hours=sign * int(hour_str),
|
||||
minutes=sign * int(minute_str),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def match_to_localtime(match: "re.Match") -> time:
|
||||
hour_str, minute_str, sec_str, micros_str = match.groups()
|
||||
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
||||
return time(int(hour_str), int(minute_str), int(sec_str), micros)
|
||||
|
||||
|
||||
def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any:
|
||||
if match.group("floatpart"):
|
||||
return parse_float(match.group())
|
||||
return int(match.group(), 0)
|
||||
|
|
@ -0,0 +1 @@
|
|||
# Marker file for PEP 561
|
||||
1
.venv/lib/python3.8/site-packages/isort/_version.py
Normal file
1
.venv/lib/python3.8/site-packages/isort/_version.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
__version__ = "5.10.1"
|
||||
651
.venv/lib/python3.8/site-packages/isort/api.py
Normal file
651
.venv/lib/python3.8/site-packages/isort/api.py
Normal file
|
|
@ -0,0 +1,651 @@
|
|||
__all__ = (
|
||||
"ImportKey",
|
||||
"check_code_string",
|
||||
"check_file",
|
||||
"check_stream",
|
||||
"find_imports_in_code",
|
||||
"find_imports_in_file",
|
||||
"find_imports_in_paths",
|
||||
"find_imports_in_stream",
|
||||
"place_module",
|
||||
"place_module_with_reason",
|
||||
"sort_code_string",
|
||||
"sort_file",
|
||||
"sort_stream",
|
||||
)
|
||||
|
||||
import contextlib
|
||||
import shutil
|
||||
import sys
|
||||
from enum import Enum
|
||||
from io import StringIO
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterator, Optional, Set, TextIO, Union, cast
|
||||
from warnings import warn
|
||||
|
||||
from isort import core
|
||||
|
||||
from . import files, identify, io
|
||||
from .exceptions import (
|
||||
ExistingSyntaxErrors,
|
||||
FileSkipComment,
|
||||
FileSkipSetting,
|
||||
IntroducedSyntaxErrors,
|
||||
)
|
||||
from .format import ask_whether_to_apply_changes_to_file, create_terminal_printer, show_unified_diff
|
||||
from .io import Empty, File
|
||||
from .place import module as place_module # noqa: F401
|
||||
from .place import module_with_reason as place_module_with_reason # noqa: F401
|
||||
from .settings import CYTHON_EXTENSIONS, DEFAULT_CONFIG, Config
|
||||
|
||||
|
||||
class ImportKey(Enum):
|
||||
"""Defines how to key an individual import, generally for deduping.
|
||||
|
||||
Import keys are defined from less to more specific:
|
||||
|
||||
from x.y import z as a
|
||||
______| | | |
|
||||
| | | |
|
||||
PACKAGE | | |
|
||||
________| | |
|
||||
| | |
|
||||
MODULE | |
|
||||
_________________| |
|
||||
| |
|
||||
ATTRIBUTE |
|
||||
______________________|
|
||||
|
|
||||
ALIAS
|
||||
"""
|
||||
|
||||
PACKAGE = 1
|
||||
MODULE = 2
|
||||
ATTRIBUTE = 3
|
||||
ALIAS = 4
|
||||
|
||||
|
||||
def sort_code_string(
|
||||
code: str,
|
||||
extension: Optional[str] = None,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = False,
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
**config_kwargs: Any,
|
||||
) -> str:
|
||||
"""Sorts any imports within the provided code string, returning a new string with them sorted.
|
||||
|
||||
- **code**: The string of code with imports that need to be sorted.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
input_stream = StringIO(code)
|
||||
output_stream = StringIO()
|
||||
config = _config(path=file_path, config=config, **config_kwargs)
|
||||
sort_stream(
|
||||
input_stream,
|
||||
output_stream,
|
||||
extension=extension,
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
show_diff=show_diff,
|
||||
)
|
||||
output_stream.seek(0)
|
||||
return output_stream.read()
|
||||
|
||||
|
||||
def check_code_string(
|
||||
code: str,
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
extension: Optional[str] = None,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = False,
|
||||
**config_kwargs: Any,
|
||||
) -> bool:
|
||||
"""Checks the order, format, and categorization of imports within the provided code string.
|
||||
Returns `True` if everything is correct, otherwise `False`.
|
||||
|
||||
- **code**: The string of code with imports that need to be sorted.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
config = _config(path=file_path, config=config, **config_kwargs)
|
||||
return check_stream(
|
||||
StringIO(code),
|
||||
show_diff=show_diff,
|
||||
extension=extension,
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
)
|
||||
|
||||
|
||||
def sort_stream(
|
||||
input_stream: TextIO,
|
||||
output_stream: TextIO,
|
||||
extension: Optional[str] = None,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = False,
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
raise_on_skip: bool = True,
|
||||
**config_kwargs: Any,
|
||||
) -> bool:
|
||||
"""Sorts any imports within the provided code stream, outputs to the provided output stream.
|
||||
Returns `True` if anything is modified from the original input stream, otherwise `False`.
|
||||
|
||||
- **input_stream**: The stream of code with imports that need to be sorted.
|
||||
- **output_stream**: The stream where sorted imports should be written to.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
extension = extension or (file_path and file_path.suffix.lstrip(".")) or "py"
|
||||
if show_diff:
|
||||
_output_stream = StringIO()
|
||||
_input_stream = StringIO(input_stream.read())
|
||||
changed = sort_stream(
|
||||
input_stream=_input_stream,
|
||||
output_stream=_output_stream,
|
||||
extension=extension,
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
raise_on_skip=raise_on_skip,
|
||||
**config_kwargs,
|
||||
)
|
||||
_output_stream.seek(0)
|
||||
_input_stream.seek(0)
|
||||
show_unified_diff(
|
||||
file_input=_input_stream.read(),
|
||||
file_output=_output_stream.read(),
|
||||
file_path=file_path,
|
||||
output=output_stream if show_diff is True else cast(TextIO, show_diff),
|
||||
color_output=config.color_output,
|
||||
)
|
||||
return changed
|
||||
|
||||
config = _config(path=file_path, config=config, **config_kwargs)
|
||||
content_source = str(file_path or "Passed in content")
|
||||
if not disregard_skip and file_path and config.is_skipped(file_path):
|
||||
raise FileSkipSetting(content_source)
|
||||
|
||||
_internal_output = output_stream
|
||||
|
||||
if config.atomic:
|
||||
try:
|
||||
file_content = input_stream.read()
|
||||
compile(file_content, content_source, "exec", 0, 1)
|
||||
except SyntaxError:
|
||||
if extension not in CYTHON_EXTENSIONS:
|
||||
raise ExistingSyntaxErrors(content_source)
|
||||
if config.verbose:
|
||||
warn(
|
||||
f"{content_source} Python AST errors found but ignored due to Cython extension"
|
||||
)
|
||||
input_stream = StringIO(file_content)
|
||||
|
||||
if not output_stream.readable():
|
||||
_internal_output = StringIO()
|
||||
|
||||
try:
|
||||
changed = core.process(
|
||||
input_stream,
|
||||
_internal_output,
|
||||
extension=extension,
|
||||
config=config,
|
||||
raise_on_skip=raise_on_skip,
|
||||
)
|
||||
except FileSkipComment:
|
||||
raise FileSkipComment(content_source)
|
||||
|
||||
if config.atomic:
|
||||
_internal_output.seek(0)
|
||||
try:
|
||||
compile(_internal_output.read(), content_source, "exec", 0, 1)
|
||||
_internal_output.seek(0)
|
||||
except SyntaxError: # pragma: no cover
|
||||
if extension not in CYTHON_EXTENSIONS:
|
||||
raise IntroducedSyntaxErrors(content_source)
|
||||
if config.verbose:
|
||||
warn(
|
||||
f"{content_source} Python AST errors found but ignored due to Cython extension"
|
||||
)
|
||||
if _internal_output != output_stream:
|
||||
output_stream.write(_internal_output.read())
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def check_stream(
|
||||
input_stream: TextIO,
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
extension: Optional[str] = None,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = False,
|
||||
**config_kwargs: Any,
|
||||
) -> bool:
|
||||
"""Checks any imports within the provided code stream, returning `False` if any unsorted or
|
||||
incorrectly imports are found or `True` if no problems are identified.
|
||||
|
||||
- **input_stream**: The stream of code with imports that need to be sorted.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
config = _config(path=file_path, config=config, **config_kwargs)
|
||||
|
||||
if show_diff:
|
||||
input_stream = StringIO(input_stream.read())
|
||||
|
||||
changed: bool = sort_stream(
|
||||
input_stream=input_stream,
|
||||
output_stream=Empty,
|
||||
extension=extension,
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
)
|
||||
printer = create_terminal_printer(
|
||||
color=config.color_output, error=config.format_error, success=config.format_success
|
||||
)
|
||||
if not changed:
|
||||
if config.verbose and not config.only_modified:
|
||||
printer.success(f"{file_path or ''} Everything Looks Good!")
|
||||
return True
|
||||
|
||||
printer.error(f"{file_path or ''} Imports are incorrectly sorted and/or formatted.")
|
||||
if show_diff:
|
||||
output_stream = StringIO()
|
||||
input_stream.seek(0)
|
||||
file_contents = input_stream.read()
|
||||
sort_stream(
|
||||
input_stream=StringIO(file_contents),
|
||||
output_stream=output_stream,
|
||||
extension=extension,
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
)
|
||||
output_stream.seek(0)
|
||||
|
||||
show_unified_diff(
|
||||
file_input=file_contents,
|
||||
file_output=output_stream.read(),
|
||||
file_path=file_path,
|
||||
output=None if show_diff is True else cast(TextIO, show_diff),
|
||||
color_output=config.color_output,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def check_file(
|
||||
filename: Union[str, Path],
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = True,
|
||||
extension: Optional[str] = None,
|
||||
**config_kwargs: Any,
|
||||
) -> bool:
|
||||
"""Checks any imports within the provided file, returning `False` if any unsorted or
|
||||
incorrectly imports are found or `True` if no problems are identified.
|
||||
|
||||
- **filename**: The name or Path of the file to check.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
file_config: Config = config
|
||||
|
||||
if "config_trie" in config_kwargs:
|
||||
config_trie = config_kwargs.pop("config_trie", None)
|
||||
if config_trie:
|
||||
config_info = config_trie.search(filename)
|
||||
if config.verbose:
|
||||
print(f"{config_info[0]} used for file {filename}")
|
||||
|
||||
file_config = Config(**config_info[1])
|
||||
|
||||
with io.File.read(filename) as source_file:
|
||||
return check_stream(
|
||||
source_file.stream,
|
||||
show_diff=show_diff,
|
||||
extension=extension,
|
||||
config=file_config,
|
||||
file_path=file_path or source_file.path,
|
||||
disregard_skip=disregard_skip,
|
||||
**config_kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _tmp_file(source_file: File) -> Path:
|
||||
return source_file.path.with_suffix(source_file.path.suffix + ".isorted")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _in_memory_output_stream_context() -> Iterator[TextIO]:
|
||||
yield StringIO(newline=None)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _file_output_stream_context(filename: Union[str, Path], source_file: File) -> Iterator[TextIO]:
|
||||
tmp_file = _tmp_file(source_file)
|
||||
with tmp_file.open("w+", encoding=source_file.encoding, newline="") as output_stream:
|
||||
shutil.copymode(filename, tmp_file)
|
||||
yield output_stream
|
||||
|
||||
|
||||
def sort_file(
|
||||
filename: Union[str, Path],
|
||||
extension: Optional[str] = None,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
disregard_skip: bool = True,
|
||||
ask_to_apply: bool = False,
|
||||
show_diff: Union[bool, TextIO] = False,
|
||||
write_to_stdout: bool = False,
|
||||
output: Optional[TextIO] = None,
|
||||
**config_kwargs: Any,
|
||||
) -> bool:
|
||||
"""Sorts and formats any groups of imports imports within the provided file or Path.
|
||||
Returns `True` if the file has been changed, otherwise `False`.
|
||||
|
||||
- **filename**: The name or Path of the file to format.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **disregard_skip**: set to `True` if you want to ignore a skip set in config for this file.
|
||||
- **ask_to_apply**: If `True`, prompt before applying any changes.
|
||||
- **show_diff**: If `True` the changes that need to be done will be printed to stdout, if a
|
||||
TextIO stream is provided results will be written to it, otherwise no diff will be computed.
|
||||
- **write_to_stdout**: If `True`, write to stdout instead of the input file.
|
||||
- **output**: If a TextIO is provided, results will be written there rather than replacing
|
||||
the original file content.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
file_config: Config = config
|
||||
|
||||
if "config_trie" in config_kwargs:
|
||||
config_trie = config_kwargs.pop("config_trie", None)
|
||||
if config_trie:
|
||||
config_info = config_trie.search(filename)
|
||||
if config.verbose:
|
||||
print(f"{config_info[0]} used for file {filename}")
|
||||
|
||||
file_config = Config(**config_info[1])
|
||||
|
||||
with io.File.read(filename) as source_file:
|
||||
actual_file_path = file_path or source_file.path
|
||||
config = _config(path=actual_file_path, config=file_config, **config_kwargs)
|
||||
changed: bool = False
|
||||
try:
|
||||
if write_to_stdout:
|
||||
changed = sort_stream(
|
||||
input_stream=source_file.stream,
|
||||
output_stream=sys.stdout,
|
||||
config=config,
|
||||
file_path=actual_file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
extension=extension,
|
||||
)
|
||||
else:
|
||||
if output is None:
|
||||
try:
|
||||
if config.overwrite_in_place:
|
||||
output_stream_context = _in_memory_output_stream_context()
|
||||
else:
|
||||
output_stream_context = _file_output_stream_context(
|
||||
filename, source_file
|
||||
)
|
||||
with output_stream_context as output_stream:
|
||||
changed = sort_stream(
|
||||
input_stream=source_file.stream,
|
||||
output_stream=output_stream,
|
||||
config=config,
|
||||
file_path=actual_file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
extension=extension,
|
||||
)
|
||||
output_stream.seek(0)
|
||||
if changed:
|
||||
if show_diff or ask_to_apply:
|
||||
source_file.stream.seek(0)
|
||||
show_unified_diff(
|
||||
file_input=source_file.stream.read(),
|
||||
file_output=output_stream.read(),
|
||||
file_path=actual_file_path,
|
||||
output=None
|
||||
if show_diff is True
|
||||
else cast(TextIO, show_diff),
|
||||
color_output=config.color_output,
|
||||
)
|
||||
if show_diff or (
|
||||
ask_to_apply
|
||||
and not ask_whether_to_apply_changes_to_file(
|
||||
str(source_file.path)
|
||||
)
|
||||
):
|
||||
return False
|
||||
source_file.stream.close()
|
||||
if config.overwrite_in_place:
|
||||
output_stream.seek(0)
|
||||
with source_file.path.open("w") as fs:
|
||||
shutil.copyfileobj(output_stream, fs)
|
||||
if changed:
|
||||
if not config.overwrite_in_place:
|
||||
tmp_file = _tmp_file(source_file)
|
||||
tmp_file.replace(source_file.path)
|
||||
if not config.quiet:
|
||||
print(f"Fixing {source_file.path}")
|
||||
finally:
|
||||
try: # Python 3.8+: use `missing_ok=True` instead of try except.
|
||||
if not config.overwrite_in_place: # pragma: no branch
|
||||
tmp_file = _tmp_file(source_file)
|
||||
tmp_file.unlink()
|
||||
except FileNotFoundError:
|
||||
pass # pragma: no cover
|
||||
else:
|
||||
changed = sort_stream(
|
||||
input_stream=source_file.stream,
|
||||
output_stream=output,
|
||||
config=config,
|
||||
file_path=actual_file_path,
|
||||
disregard_skip=disregard_skip,
|
||||
extension=extension,
|
||||
)
|
||||
if changed and show_diff:
|
||||
source_file.stream.seek(0)
|
||||
output.seek(0)
|
||||
show_unified_diff(
|
||||
file_input=source_file.stream.read(),
|
||||
file_output=output.read(),
|
||||
file_path=actual_file_path,
|
||||
output=None if show_diff is True else cast(TextIO, show_diff),
|
||||
color_output=config.color_output,
|
||||
)
|
||||
source_file.stream.close()
|
||||
|
||||
except ExistingSyntaxErrors:
|
||||
warn(f"{actual_file_path} unable to sort due to existing syntax errors")
|
||||
except IntroducedSyntaxErrors: # pragma: no cover
|
||||
warn(f"{actual_file_path} unable to sort as isort introduces new syntax errors")
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def find_imports_in_code(
|
||||
code: str,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
unique: Union[bool, ImportKey] = False,
|
||||
top_only: bool = False,
|
||||
**config_kwargs: Any,
|
||||
) -> Iterator[identify.Import]:
|
||||
"""Finds and returns all imports within the provided code string.
|
||||
|
||||
- **code**: The string of code with imports that need to be sorted.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **unique**: If True, only the first instance of an import is returned.
|
||||
- **top_only**: If True, only return imports that occur before the first function or class.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
yield from find_imports_in_stream(
|
||||
input_stream=StringIO(code),
|
||||
config=config,
|
||||
file_path=file_path,
|
||||
unique=unique,
|
||||
top_only=top_only,
|
||||
**config_kwargs,
|
||||
)
|
||||
|
||||
|
||||
def find_imports_in_stream(
|
||||
input_stream: TextIO,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
unique: Union[bool, ImportKey] = False,
|
||||
top_only: bool = False,
|
||||
_seen: Optional[Set[str]] = None,
|
||||
**config_kwargs: Any,
|
||||
) -> Iterator[identify.Import]:
|
||||
"""Finds and returns all imports within the provided code stream.
|
||||
|
||||
- **input_stream**: The stream of code with imports that need to be sorted.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **unique**: If True, only the first instance of an import is returned.
|
||||
- **top_only**: If True, only return imports that occur before the first function or class.
|
||||
- **_seen**: An optional set of imports already seen. Generally meant only for internal use.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
config = _config(config=config, **config_kwargs)
|
||||
identified_imports = identify.imports(
|
||||
input_stream, config=config, file_path=file_path, top_only=top_only
|
||||
)
|
||||
if not unique:
|
||||
yield from identified_imports
|
||||
|
||||
seen: Set[str] = set() if _seen is None else _seen
|
||||
for identified_import in identified_imports:
|
||||
if unique in (True, ImportKey.ALIAS):
|
||||
key = identified_import.statement()
|
||||
elif unique == ImportKey.ATTRIBUTE:
|
||||
key = f"{identified_import.module}.{identified_import.attribute}"
|
||||
elif unique == ImportKey.MODULE:
|
||||
key = identified_import.module
|
||||
elif unique == ImportKey.PACKAGE: # pragma: no branch # type checking ensures this
|
||||
key = identified_import.module.split(".")[0]
|
||||
|
||||
if key and key not in seen:
|
||||
seen.add(key)
|
||||
yield identified_import
|
||||
|
||||
|
||||
def find_imports_in_file(
|
||||
filename: Union[str, Path],
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
unique: Union[bool, ImportKey] = False,
|
||||
top_only: bool = False,
|
||||
**config_kwargs: Any,
|
||||
) -> Iterator[identify.Import]:
|
||||
"""Finds and returns all imports within the provided source file.
|
||||
|
||||
- **filename**: The name or Path of the file to look for imports in.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **unique**: If True, only the first instance of an import is returned.
|
||||
- **top_only**: If True, only return imports that occur before the first function or class.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
with io.File.read(filename) as source_file:
|
||||
yield from find_imports_in_stream(
|
||||
input_stream=source_file.stream,
|
||||
config=config,
|
||||
file_path=file_path or source_file.path,
|
||||
unique=unique,
|
||||
top_only=top_only,
|
||||
**config_kwargs,
|
||||
)
|
||||
|
||||
|
||||
def find_imports_in_paths(
|
||||
paths: Iterator[Union[str, Path]],
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
unique: Union[bool, ImportKey] = False,
|
||||
top_only: bool = False,
|
||||
**config_kwargs: Any,
|
||||
) -> Iterator[identify.Import]:
|
||||
"""Finds and returns all imports within the provided source paths.
|
||||
|
||||
- **paths**: A collection of paths to recursively look for imports within.
|
||||
- **extension**: The file extension that contains imports. Defaults to filename extension or py.
|
||||
- **config**: The config object to use when sorting imports.
|
||||
- **file_path**: The disk location where the code string was pulled from.
|
||||
- **unique**: If True, only the first instance of an import is returned.
|
||||
- **top_only**: If True, only return imports that occur before the first function or class.
|
||||
- ****config_kwargs**: Any config modifications.
|
||||
"""
|
||||
config = _config(config=config, **config_kwargs)
|
||||
seen: Optional[Set[str]] = set() if unique else None
|
||||
yield from chain(
|
||||
*(
|
||||
find_imports_in_file(
|
||||
file_name, unique=unique, config=config, top_only=top_only, _seen=seen
|
||||
)
|
||||
for file_name in files.find(map(str, paths), config, [], [])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _config(
|
||||
path: Optional[Path] = None, config: Config = DEFAULT_CONFIG, **config_kwargs: Any
|
||||
) -> Config:
|
||||
if path and (
|
||||
config is DEFAULT_CONFIG
|
||||
and "settings_path" not in config_kwargs
|
||||
and "settings_file" not in config_kwargs
|
||||
):
|
||||
config_kwargs["settings_path"] = path
|
||||
|
||||
if config_kwargs:
|
||||
if config is not DEFAULT_CONFIG:
|
||||
raise ValueError(
|
||||
"You can either specify custom configuration options using kwargs or "
|
||||
"passing in a Config object. Not Both!"
|
||||
)
|
||||
|
||||
config = Config(**config_kwargs)
|
||||
|
||||
return config
|
||||
32
.venv/lib/python3.8/site-packages/isort/comments.py
Normal file
32
.venv/lib/python3.8/site-packages/isort/comments.py
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
def parse(line: str) -> Tuple[str, str]:
|
||||
"""Parses import lines for comments and returns back the
|
||||
import statement and the associated comment.
|
||||
"""
|
||||
comment_start = line.find("#")
|
||||
if comment_start != -1:
|
||||
return (line[:comment_start], line[comment_start + 1 :].strip())
|
||||
|
||||
return (line, "")
|
||||
|
||||
|
||||
def add_to_line(
|
||||
comments: Optional[List[str]],
|
||||
original_string: str = "",
|
||||
removed: bool = False,
|
||||
comment_prefix: str = "",
|
||||
) -> str:
|
||||
"""Returns a string with comments added if removed is not set."""
|
||||
if removed:
|
||||
return parse(original_string)[0]
|
||||
|
||||
if not comments:
|
||||
return original_string
|
||||
|
||||
unique_comments: List[str] = []
|
||||
for comment in comments:
|
||||
if comment not in unique_comments:
|
||||
unique_comments.append(comment)
|
||||
return f"{parse(original_string)[0]}{comment_prefix} {'; '.join(unique_comments)}"
|
||||
476
.venv/lib/python3.8/site-packages/isort/core.py
Normal file
476
.venv/lib/python3.8/site-packages/isort/core.py
Normal file
|
|
@ -0,0 +1,476 @@
|
|||
import textwrap
|
||||
from io import StringIO
|
||||
from itertools import chain
|
||||
from typing import List, TextIO, Union
|
||||
|
||||
import isort.literal
|
||||
from isort.settings import DEFAULT_CONFIG, Config
|
||||
|
||||
from . import output, parse
|
||||
from .exceptions import FileSkipComment
|
||||
from .format import format_natural, remove_whitespace
|
||||
from .settings import FILE_SKIP_COMMENTS
|
||||
|
||||
CIMPORT_IDENTIFIERS = ("cimport ", "cimport*", "from.cimport")
|
||||
IMPORT_START_IDENTIFIERS = ("from ", "from.import", "import ", "import*") + CIMPORT_IDENTIFIERS
|
||||
DOCSTRING_INDICATORS = ('"""', "'''")
|
||||
COMMENT_INDICATORS = DOCSTRING_INDICATORS + ("'", '"', "#")
|
||||
CODE_SORT_COMMENTS = (
|
||||
"# isort: list",
|
||||
"# isort: dict",
|
||||
"# isort: set",
|
||||
"# isort: unique-list",
|
||||
"# isort: tuple",
|
||||
"# isort: unique-tuple",
|
||||
"# isort: assignments",
|
||||
)
|
||||
|
||||
|
||||
def process(
|
||||
input_stream: TextIO,
|
||||
output_stream: TextIO,
|
||||
extension: str = "py",
|
||||
raise_on_skip: bool = True,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
) -> bool:
|
||||
"""Parses stream identifying sections of contiguous imports and sorting them
|
||||
|
||||
Code with unsorted imports is read from the provided `input_stream`, sorted and then
|
||||
outputted to the specified `output_stream`.
|
||||
|
||||
- `input_stream`: Text stream with unsorted import sections.
|
||||
- `output_stream`: Text stream to output sorted inputs into.
|
||||
- `config`: Config settings to use when sorting imports. Defaults settings.
|
||||
- *Default*: `isort.settings.DEFAULT_CONFIG`.
|
||||
- `extension`: The file extension or file extension rules that should be used.
|
||||
- *Default*: `"py"`.
|
||||
- *Choices*: `["py", "pyi", "pyx"]`.
|
||||
|
||||
Returns `True` if there were changes that needed to be made (errors present) from what
|
||||
was provided in the input_stream, otherwise `False`.
|
||||
"""
|
||||
line_separator: str = config.line_ending
|
||||
add_imports: List[str] = [format_natural(addition) for addition in config.add_imports]
|
||||
import_section: str = ""
|
||||
next_import_section: str = ""
|
||||
next_cimports: bool = False
|
||||
in_quote: str = ""
|
||||
was_in_quote: bool = False
|
||||
first_comment_index_start: int = -1
|
||||
first_comment_index_end: int = -1
|
||||
contains_imports: bool = False
|
||||
in_top_comment: bool = False
|
||||
first_import_section: bool = True
|
||||
indent: str = ""
|
||||
isort_off: bool = False
|
||||
skip_file: bool = False
|
||||
code_sorting: Union[bool, str] = False
|
||||
code_sorting_section: str = ""
|
||||
code_sorting_indent: str = ""
|
||||
cimports: bool = False
|
||||
made_changes: bool = False
|
||||
stripped_line: str = ""
|
||||
end_of_file: bool = False
|
||||
verbose_output: List[str] = []
|
||||
|
||||
if config.float_to_top:
|
||||
new_input = ""
|
||||
current = ""
|
||||
isort_off = False
|
||||
for line in chain(input_stream, (None,)):
|
||||
if isort_off and line is not None:
|
||||
if line == "# isort: on\n":
|
||||
isort_off = False
|
||||
new_input += line
|
||||
elif line in ("# isort: split\n", "# isort: off\n", None) or str(line).endswith(
|
||||
"# isort: split\n"
|
||||
):
|
||||
if line == "# isort: off\n":
|
||||
isort_off = True
|
||||
if current:
|
||||
if add_imports:
|
||||
add_line_separator = line_separator or "\n"
|
||||
current += add_line_separator + add_line_separator.join(add_imports)
|
||||
add_imports = []
|
||||
parsed = parse.file_contents(current, config=config)
|
||||
verbose_output += parsed.verbose_output
|
||||
extra_space = ""
|
||||
while current and current[-1] == "\n":
|
||||
extra_space += "\n"
|
||||
current = current[:-1]
|
||||
extra_space = extra_space.replace("\n", "", 1)
|
||||
sorted_output = output.sorted_imports(
|
||||
parsed, config, extension, import_type="import"
|
||||
)
|
||||
made_changes = made_changes or _has_changed(
|
||||
before=current,
|
||||
after=sorted_output,
|
||||
line_separator=parsed.line_separator,
|
||||
ignore_whitespace=config.ignore_whitespace,
|
||||
)
|
||||
new_input += sorted_output
|
||||
new_input += extra_space
|
||||
current = ""
|
||||
new_input += line or ""
|
||||
else:
|
||||
current += line or ""
|
||||
|
||||
input_stream = StringIO(new_input)
|
||||
|
||||
for index, line in enumerate(chain(input_stream, (None,))):
|
||||
if line is None:
|
||||
if index == 0 and not config.force_adds:
|
||||
return False
|
||||
|
||||
not_imports = True
|
||||
end_of_file = True
|
||||
line = ""
|
||||
if not line_separator:
|
||||
line_separator = "\n"
|
||||
|
||||
if code_sorting and code_sorting_section:
|
||||
sorted_code = textwrap.indent(
|
||||
isort.literal.assignment(
|
||||
code_sorting_section,
|
||||
str(code_sorting),
|
||||
extension,
|
||||
config=_indented_config(config, indent),
|
||||
),
|
||||
code_sorting_indent,
|
||||
)
|
||||
made_changes = made_changes or _has_changed(
|
||||
before=code_sorting_section,
|
||||
after=sorted_code,
|
||||
line_separator=line_separator,
|
||||
ignore_whitespace=config.ignore_whitespace,
|
||||
)
|
||||
output_stream.write(sorted_code)
|
||||
else:
|
||||
stripped_line = line.strip()
|
||||
if stripped_line and not line_separator:
|
||||
line_separator = line[len(line.rstrip()) :].replace(" ", "").replace("\t", "")
|
||||
|
||||
for file_skip_comment in FILE_SKIP_COMMENTS:
|
||||
if file_skip_comment in line:
|
||||
if raise_on_skip:
|
||||
raise FileSkipComment("Passed in content")
|
||||
isort_off = True
|
||||
skip_file = True
|
||||
|
||||
if not in_quote:
|
||||
if stripped_line == "# isort: off":
|
||||
isort_off = True
|
||||
elif stripped_line.startswith("# isort: dont-add-imports"):
|
||||
add_imports = []
|
||||
elif stripped_line.startswith("# isort: dont-add-import:"):
|
||||
import_not_to_add = stripped_line.split("# isort: dont-add-import:", 1)[
|
||||
1
|
||||
].strip()
|
||||
add_imports = [
|
||||
import_to_add
|
||||
for import_to_add in add_imports
|
||||
if not import_to_add == import_not_to_add
|
||||
]
|
||||
|
||||
if (
|
||||
(index == 0 or (index in (1, 2) and not contains_imports))
|
||||
and stripped_line.startswith("#")
|
||||
and stripped_line not in config.section_comments
|
||||
and stripped_line not in CODE_SORT_COMMENTS
|
||||
):
|
||||
in_top_comment = True
|
||||
elif in_top_comment and (
|
||||
not line.startswith("#")
|
||||
or stripped_line in config.section_comments
|
||||
or stripped_line in CODE_SORT_COMMENTS
|
||||
):
|
||||
in_top_comment = False
|
||||
first_comment_index_end = index - 1
|
||||
|
||||
was_in_quote = bool(in_quote)
|
||||
if (not stripped_line.startswith("#") or in_quote) and '"' in line or "'" in line:
|
||||
char_index = 0
|
||||
if first_comment_index_start == -1 and (
|
||||
line.startswith('"') or line.startswith("'")
|
||||
):
|
||||
first_comment_index_start = index
|
||||
while char_index < len(line):
|
||||
if line[char_index] == "\\":
|
||||
char_index += 1
|
||||
elif in_quote:
|
||||
if line[char_index : char_index + len(in_quote)] == in_quote:
|
||||
in_quote = ""
|
||||
if first_comment_index_end < first_comment_index_start:
|
||||
first_comment_index_end = index
|
||||
elif line[char_index] in ("'", '"'):
|
||||
long_quote = line[char_index : char_index + 3]
|
||||
if long_quote in ('"""', "'''"):
|
||||
in_quote = long_quote
|
||||
char_index += 2
|
||||
else:
|
||||
in_quote = line[char_index]
|
||||
elif line[char_index] == "#":
|
||||
break
|
||||
char_index += 1
|
||||
|
||||
not_imports = bool(in_quote) or was_in_quote or in_top_comment or isort_off
|
||||
if not (in_quote or was_in_quote or in_top_comment):
|
||||
if isort_off:
|
||||
if not skip_file and stripped_line == "# isort: on":
|
||||
isort_off = False
|
||||
elif stripped_line.endswith("# isort: split"):
|
||||
not_imports = True
|
||||
elif stripped_line in CODE_SORT_COMMENTS:
|
||||
code_sorting = stripped_line.split("isort: ")[1].strip()
|
||||
code_sorting_indent = line[: -len(line.lstrip())]
|
||||
not_imports = True
|
||||
elif code_sorting:
|
||||
if not stripped_line:
|
||||
sorted_code = textwrap.indent(
|
||||
isort.literal.assignment(
|
||||
code_sorting_section,
|
||||
str(code_sorting),
|
||||
extension,
|
||||
config=_indented_config(config, indent),
|
||||
),
|
||||
code_sorting_indent,
|
||||
)
|
||||
made_changes = made_changes or _has_changed(
|
||||
before=code_sorting_section,
|
||||
after=sorted_code,
|
||||
line_separator=line_separator,
|
||||
ignore_whitespace=config.ignore_whitespace,
|
||||
)
|
||||
output_stream.write(sorted_code)
|
||||
not_imports = True
|
||||
code_sorting = False
|
||||
code_sorting_section = ""
|
||||
code_sorting_indent = ""
|
||||
else:
|
||||
code_sorting_section += line
|
||||
line = ""
|
||||
elif (
|
||||
stripped_line in config.section_comments
|
||||
or stripped_line in config.section_comments_end
|
||||
):
|
||||
if import_section and not contains_imports:
|
||||
output_stream.write(import_section)
|
||||
import_section = line
|
||||
not_imports = False
|
||||
else:
|
||||
import_section += line
|
||||
indent = line[: -len(line.lstrip())]
|
||||
elif not (stripped_line or contains_imports):
|
||||
not_imports = True
|
||||
elif (
|
||||
not stripped_line
|
||||
or stripped_line.startswith("#")
|
||||
and (not indent or indent + line.lstrip() == line)
|
||||
and not config.treat_all_comments_as_code
|
||||
and stripped_line not in config.treat_comments_as_code
|
||||
):
|
||||
import_section += line
|
||||
elif stripped_line.startswith(IMPORT_START_IDENTIFIERS):
|
||||
new_indent = line[: -len(line.lstrip())]
|
||||
import_statement = line
|
||||
stripped_line = line.strip().split("#")[0]
|
||||
while stripped_line.endswith("\\") or (
|
||||
"(" in stripped_line and ")" not in stripped_line
|
||||
):
|
||||
if stripped_line.endswith("\\"):
|
||||
while stripped_line and stripped_line.endswith("\\"):
|
||||
line = input_stream.readline()
|
||||
stripped_line = line.strip().split("#")[0]
|
||||
import_statement += line
|
||||
else:
|
||||
while ")" not in stripped_line:
|
||||
line = input_stream.readline()
|
||||
stripped_line = line.strip().split("#")[0]
|
||||
import_statement += line
|
||||
|
||||
if (
|
||||
import_statement.lstrip().startswith("from")
|
||||
and "import" not in import_statement
|
||||
):
|
||||
line = import_statement
|
||||
not_imports = True
|
||||
else:
|
||||
did_contain_imports = contains_imports
|
||||
contains_imports = True
|
||||
|
||||
cimport_statement: bool = False
|
||||
if (
|
||||
import_statement.lstrip().startswith(CIMPORT_IDENTIFIERS)
|
||||
or " cimport " in import_statement
|
||||
or " cimport*" in import_statement
|
||||
or " cimport(" in import_statement
|
||||
or ".cimport" in import_statement
|
||||
):
|
||||
cimport_statement = True
|
||||
|
||||
if cimport_statement != cimports or (
|
||||
new_indent != indent
|
||||
and import_section
|
||||
and (not did_contain_imports or len(new_indent) < len(indent))
|
||||
):
|
||||
indent = new_indent
|
||||
if import_section:
|
||||
next_cimports = cimport_statement
|
||||
next_import_section = import_statement
|
||||
import_statement = ""
|
||||
not_imports = True
|
||||
line = ""
|
||||
else:
|
||||
cimports = cimport_statement
|
||||
else:
|
||||
if new_indent != indent:
|
||||
if import_section and did_contain_imports:
|
||||
import_statement = indent + import_statement.lstrip()
|
||||
else:
|
||||
indent = new_indent
|
||||
import_section += import_statement
|
||||
else:
|
||||
not_imports = True
|
||||
|
||||
if not_imports:
|
||||
raw_import_section: str = import_section
|
||||
if (
|
||||
add_imports
|
||||
and (stripped_line or end_of_file)
|
||||
and not config.append_only
|
||||
and not in_top_comment
|
||||
and not was_in_quote
|
||||
and not import_section
|
||||
and not line.lstrip().startswith(COMMENT_INDICATORS)
|
||||
and not (line.rstrip().endswith(DOCSTRING_INDICATORS) and "=" not in line)
|
||||
):
|
||||
add_line_separator = line_separator or "\n"
|
||||
import_section = add_line_separator.join(add_imports) + add_line_separator
|
||||
if end_of_file and index != 0:
|
||||
output_stream.write(add_line_separator)
|
||||
contains_imports = True
|
||||
add_imports = []
|
||||
|
||||
if next_import_section and not import_section: # pragma: no cover
|
||||
raw_import_section = import_section = next_import_section
|
||||
next_import_section = ""
|
||||
|
||||
if import_section:
|
||||
if add_imports and (contains_imports or not config.append_only) and not indent:
|
||||
import_section = (
|
||||
line_separator.join(add_imports) + line_separator + import_section
|
||||
)
|
||||
contains_imports = True
|
||||
add_imports = []
|
||||
|
||||
if not indent:
|
||||
import_section += line
|
||||
raw_import_section += line
|
||||
if not contains_imports:
|
||||
output_stream.write(import_section)
|
||||
|
||||
else:
|
||||
leading_whitespace = import_section[: -len(import_section.lstrip())]
|
||||
trailing_whitespace = import_section[len(import_section.rstrip()) :]
|
||||
if first_import_section and not import_section.lstrip(
|
||||
line_separator
|
||||
).startswith(COMMENT_INDICATORS):
|
||||
import_section = import_section.lstrip(line_separator)
|
||||
raw_import_section = raw_import_section.lstrip(line_separator)
|
||||
first_import_section = False
|
||||
|
||||
if indent:
|
||||
import_section = "".join(
|
||||
line[len(indent) :] for line in import_section.splitlines(keepends=True)
|
||||
)
|
||||
|
||||
parsed_content = parse.file_contents(import_section, config=config)
|
||||
verbose_output += parsed_content.verbose_output
|
||||
|
||||
sorted_import_section = output.sorted_imports(
|
||||
parsed_content,
|
||||
_indented_config(config, indent),
|
||||
extension,
|
||||
import_type="cimport" if cimports else "import",
|
||||
)
|
||||
if not (import_section.strip() and not sorted_import_section):
|
||||
if indent:
|
||||
sorted_import_section = (
|
||||
leading_whitespace
|
||||
+ textwrap.indent(sorted_import_section, indent).strip()
|
||||
+ trailing_whitespace
|
||||
)
|
||||
|
||||
made_changes = made_changes or _has_changed(
|
||||
before=raw_import_section,
|
||||
after=sorted_import_section,
|
||||
line_separator=line_separator,
|
||||
ignore_whitespace=config.ignore_whitespace,
|
||||
)
|
||||
output_stream.write(sorted_import_section)
|
||||
if not line and not indent and next_import_section:
|
||||
output_stream.write(line_separator)
|
||||
|
||||
if indent:
|
||||
output_stream.write(line)
|
||||
if not next_import_section:
|
||||
indent = ""
|
||||
|
||||
if next_import_section:
|
||||
cimports = next_cimports
|
||||
contains_imports = True
|
||||
else:
|
||||
contains_imports = False
|
||||
import_section = next_import_section
|
||||
next_import_section = ""
|
||||
else:
|
||||
output_stream.write(line)
|
||||
not_imports = False
|
||||
|
||||
if stripped_line and not in_quote and not import_section and not next_import_section:
|
||||
if stripped_line == "yield":
|
||||
while not stripped_line or stripped_line == "yield":
|
||||
new_line = input_stream.readline()
|
||||
if not new_line:
|
||||
break
|
||||
|
||||
output_stream.write(new_line)
|
||||
stripped_line = new_line.strip().split("#")[0]
|
||||
|
||||
if stripped_line.startswith("raise") or stripped_line.startswith("yield"):
|
||||
while stripped_line.endswith("\\"):
|
||||
new_line = input_stream.readline()
|
||||
if not new_line:
|
||||
break
|
||||
|
||||
output_stream.write(new_line)
|
||||
stripped_line = new_line.strip().split("#")[0]
|
||||
|
||||
if made_changes and config.only_modified:
|
||||
for output_str in verbose_output:
|
||||
print(output_str)
|
||||
|
||||
return made_changes
|
||||
|
||||
|
||||
def _indented_config(config: Config, indent: str) -> Config:
|
||||
if not indent:
|
||||
return config
|
||||
|
||||
return Config(
|
||||
config=config,
|
||||
line_length=max(config.line_length - len(indent), 0),
|
||||
wrap_length=max(config.wrap_length - len(indent), 0),
|
||||
lines_after_imports=1,
|
||||
import_headings=config.import_headings if config.indented_import_headings else {},
|
||||
import_footers=config.import_footers if config.indented_import_headings else {},
|
||||
)
|
||||
|
||||
|
||||
def _has_changed(before: str, after: str, line_separator: str, ignore_whitespace: bool) -> bool:
|
||||
if ignore_whitespace:
|
||||
return (
|
||||
remove_whitespace(before, line_separator=line_separator).strip()
|
||||
!= remove_whitespace(after, line_separator=line_separator).strip()
|
||||
)
|
||||
return before.strip() != after.strip()
|
||||
Binary file not shown.
Binary file not shown.
415
.venv/lib/python3.8/site-packages/isort/deprecated/finders.py
Normal file
415
.venv/lib/python3.8/site-packages/isort/deprecated/finders.py
Normal file
|
|
@ -0,0 +1,415 @@
|
|||
"""Finders try to find right section for passed module name"""
|
||||
import importlib.machinery
|
||||
import inspect
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import sysconfig
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from fnmatch import fnmatch
|
||||
from functools import lru_cache
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, Iterator, List, Optional, Pattern, Sequence, Tuple, Type
|
||||
|
||||
from isort import sections
|
||||
from isort.settings import KNOWN_SECTION_MAPPING, Config
|
||||
from isort.utils import exists_case_sensitive
|
||||
|
||||
try:
|
||||
from pipreqs import pipreqs # type: ignore
|
||||
|
||||
except ImportError:
|
||||
pipreqs = None
|
||||
|
||||
try:
|
||||
from pip_api import parse_requirements # type: ignore
|
||||
|
||||
except ImportError:
|
||||
parse_requirements = None
|
||||
|
||||
try:
|
||||
from requirementslib import Pipfile # type: ignore
|
||||
|
||||
except ImportError:
|
||||
Pipfile = None
|
||||
|
||||
|
||||
@contextmanager
|
||||
def chdir(path: str) -> Iterator[None]:
|
||||
"""Context manager for changing dir and restoring previous workdir after exit."""
|
||||
curdir = os.getcwd()
|
||||
os.chdir(path)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.chdir(curdir)
|
||||
|
||||
|
||||
class BaseFinder(metaclass=ABCMeta):
|
||||
def __init__(self, config: Config) -> None:
|
||||
self.config = config
|
||||
|
||||
@abstractmethod
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ForcedSeparateFinder(BaseFinder):
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
for forced_separate in self.config.forced_separate:
|
||||
# Ensure all forced_separate patterns will match to end of string
|
||||
path_glob = forced_separate
|
||||
if not forced_separate.endswith("*"):
|
||||
path_glob = "%s*" % forced_separate
|
||||
|
||||
if fnmatch(module_name, path_glob) or fnmatch(module_name, "." + path_glob):
|
||||
return forced_separate
|
||||
return None
|
||||
|
||||
|
||||
class LocalFinder(BaseFinder):
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
if module_name.startswith("."):
|
||||
return "LOCALFOLDER"
|
||||
return None
|
||||
|
||||
|
||||
class KnownPatternFinder(BaseFinder):
|
||||
def __init__(self, config: Config) -> None:
|
||||
super().__init__(config)
|
||||
|
||||
self.known_patterns: List[Tuple[Pattern[str], str]] = []
|
||||
for placement in reversed(config.sections):
|
||||
known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower()
|
||||
config_key = f"known_{known_placement}"
|
||||
known_patterns = list(
|
||||
getattr(self.config, config_key, self.config.known_other.get(known_placement, []))
|
||||
)
|
||||
known_patterns = [
|
||||
pattern
|
||||
for known_pattern in known_patterns
|
||||
for pattern in self._parse_known_pattern(known_pattern)
|
||||
]
|
||||
for known_pattern in known_patterns:
|
||||
regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$"
|
||||
self.known_patterns.append((re.compile(regexp), placement))
|
||||
|
||||
def _parse_known_pattern(self, pattern: str) -> List[str]:
|
||||
"""Expand pattern if identified as a directory and return found sub packages"""
|
||||
if pattern.endswith(os.path.sep):
|
||||
patterns = [
|
||||
filename
|
||||
for filename in os.listdir(os.path.join(self.config.directory, pattern))
|
||||
if os.path.isdir(os.path.join(self.config.directory, pattern, filename))
|
||||
]
|
||||
else:
|
||||
patterns = [pattern]
|
||||
|
||||
return patterns
|
||||
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
# Try to find most specific placement instruction match (if any)
|
||||
parts = module_name.split(".")
|
||||
module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1))
|
||||
for module_name_to_check in module_names_to_check:
|
||||
for pattern, placement in self.known_patterns:
|
||||
if pattern.match(module_name_to_check):
|
||||
return placement
|
||||
return None
|
||||
|
||||
|
||||
class PathFinder(BaseFinder):
|
||||
def __init__(self, config: Config, path: str = ".") -> None:
|
||||
super().__init__(config)
|
||||
|
||||
# restore the original import path (i.e. not the path to bin/isort)
|
||||
root_dir = os.path.abspath(path)
|
||||
src_dir = f"{root_dir}/src"
|
||||
self.paths = [root_dir, src_dir]
|
||||
|
||||
# virtual env
|
||||
self.virtual_env = self.config.virtual_env or os.environ.get("VIRTUAL_ENV")
|
||||
if self.virtual_env:
|
||||
self.virtual_env = os.path.realpath(self.virtual_env)
|
||||
self.virtual_env_src = ""
|
||||
if self.virtual_env:
|
||||
self.virtual_env_src = f"{self.virtual_env}/src/"
|
||||
for venv_path in glob(f"{self.virtual_env}/lib/python*/site-packages"):
|
||||
if venv_path not in self.paths:
|
||||
self.paths.append(venv_path)
|
||||
for nested_venv_path in glob(f"{self.virtual_env}/lib/python*/*/site-packages"):
|
||||
if nested_venv_path not in self.paths:
|
||||
self.paths.append(nested_venv_path)
|
||||
for venv_src_path in glob(f"{self.virtual_env}/src/*"):
|
||||
if os.path.isdir(venv_src_path):
|
||||
self.paths.append(venv_src_path)
|
||||
|
||||
# conda
|
||||
self.conda_env = self.config.conda_env or os.environ.get("CONDA_PREFIX") or ""
|
||||
if self.conda_env:
|
||||
self.conda_env = os.path.realpath(self.conda_env)
|
||||
for conda_path in glob(f"{self.conda_env}/lib/python*/site-packages"):
|
||||
if conda_path not in self.paths:
|
||||
self.paths.append(conda_path)
|
||||
for nested_conda_path in glob(f"{self.conda_env}/lib/python*/*/site-packages"):
|
||||
if nested_conda_path not in self.paths:
|
||||
self.paths.append(nested_conda_path)
|
||||
|
||||
# handle case-insensitive paths on windows
|
||||
self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()["stdlib"])
|
||||
if self.stdlib_lib_prefix not in self.paths:
|
||||
self.paths.append(self.stdlib_lib_prefix)
|
||||
|
||||
# add system paths
|
||||
for system_path in sys.path[1:]:
|
||||
if system_path not in self.paths:
|
||||
self.paths.append(system_path)
|
||||
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
for prefix in self.paths:
|
||||
package_path = "/".join((prefix, module_name.split(".")[0]))
|
||||
path_obj = Path(package_path).resolve()
|
||||
is_module = (
|
||||
exists_case_sensitive(package_path + ".py")
|
||||
or any(
|
||||
exists_case_sensitive(package_path + ext_suffix)
|
||||
for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES
|
||||
)
|
||||
or exists_case_sensitive(package_path + "/__init__.py")
|
||||
)
|
||||
is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path)
|
||||
if is_module or is_package:
|
||||
if (
|
||||
"site-packages" in prefix
|
||||
or "dist-packages" in prefix
|
||||
or (self.virtual_env and self.virtual_env_src in prefix)
|
||||
):
|
||||
return sections.THIRDPARTY
|
||||
if os.path.normcase(prefix) == self.stdlib_lib_prefix:
|
||||
return sections.STDLIB
|
||||
if self.conda_env and self.conda_env in prefix:
|
||||
return sections.THIRDPARTY
|
||||
for src_path in self.config.src_paths:
|
||||
if src_path in path_obj.parents and not self.config.is_skipped(path_obj):
|
||||
return sections.FIRSTPARTY
|
||||
|
||||
if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix):
|
||||
return sections.STDLIB # pragma: no cover - edge case for one OS. Hard to test.
|
||||
|
||||
return self.config.default_section
|
||||
return None
|
||||
|
||||
|
||||
class ReqsBaseFinder(BaseFinder):
|
||||
enabled = False
|
||||
|
||||
def __init__(self, config: Config, path: str = ".") -> None:
|
||||
super().__init__(config)
|
||||
self.path = path
|
||||
if self.enabled:
|
||||
self.mapping = self._load_mapping()
|
||||
self.names = self._load_names()
|
||||
|
||||
@abstractmethod
|
||||
def _get_names(self, path: str) -> Iterator[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def _get_files_from_dir(self, path: str) -> Iterator[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def _load_mapping() -> Optional[Dict[str, str]]:
|
||||
"""Return list of mappings `package_name -> module_name`
|
||||
|
||||
Example:
|
||||
django-haystack -> haystack
|
||||
"""
|
||||
if not pipreqs:
|
||||
return None
|
||||
path = os.path.dirname(inspect.getfile(pipreqs))
|
||||
path = os.path.join(path, "mapping")
|
||||
with open(path) as f:
|
||||
mappings: Dict[str, str] = {} # pypi_name: import_name
|
||||
for line in f:
|
||||
import_name, _, pypi_name = line.strip().partition(":")
|
||||
mappings[pypi_name] = import_name
|
||||
return mappings
|
||||
# return dict(tuple(line.strip().split(":")[::-1]) for line in f)
|
||||
|
||||
def _load_names(self) -> List[str]:
|
||||
"""Return list of thirdparty modules from requirements"""
|
||||
names = []
|
||||
for path in self._get_files():
|
||||
for name in self._get_names(path):
|
||||
names.append(self._normalize_name(name))
|
||||
return names
|
||||
|
||||
@staticmethod
|
||||
def _get_parents(path: str) -> Iterator[str]:
|
||||
prev = ""
|
||||
while path != prev:
|
||||
prev = path
|
||||
yield path
|
||||
path = os.path.dirname(path)
|
||||
|
||||
def _get_files(self) -> Iterator[str]:
|
||||
"""Return paths to all requirements files"""
|
||||
path = os.path.abspath(self.path)
|
||||
if os.path.isfile(path):
|
||||
path = os.path.dirname(path)
|
||||
|
||||
for path in self._get_parents(path):
|
||||
yield from self._get_files_from_dir(path)
|
||||
|
||||
def _normalize_name(self, name: str) -> str:
|
||||
"""Convert package name to module name
|
||||
|
||||
Examples:
|
||||
Django -> django
|
||||
django-haystack -> django_haystack
|
||||
Flask-RESTFul -> flask_restful
|
||||
"""
|
||||
if self.mapping:
|
||||
name = self.mapping.get(name.replace("-", "_"), name)
|
||||
return name.lower().replace("-", "_")
|
||||
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
# required lib not installed yet
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
module_name, _sep, _submodules = module_name.partition(".")
|
||||
module_name = module_name.lower()
|
||||
if not module_name:
|
||||
return None
|
||||
|
||||
for name in self.names:
|
||||
if module_name == name:
|
||||
return sections.THIRDPARTY
|
||||
return None
|
||||
|
||||
|
||||
class RequirementsFinder(ReqsBaseFinder):
|
||||
exts = (".txt", ".in")
|
||||
enabled = bool(parse_requirements)
|
||||
|
||||
def _get_files_from_dir(self, path: str) -> Iterator[str]:
|
||||
"""Return paths to requirements files from passed dir."""
|
||||
yield from self._get_files_from_dir_cached(path)
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=16)
|
||||
def _get_files_from_dir_cached(cls, path: str) -> List[str]:
|
||||
results = []
|
||||
|
||||
for fname in os.listdir(path):
|
||||
if "requirements" not in fname:
|
||||
continue
|
||||
full_path = os.path.join(path, fname)
|
||||
|
||||
# *requirements*/*.{txt,in}
|
||||
if os.path.isdir(full_path):
|
||||
for subfile_name in os.listdir(full_path):
|
||||
for ext in cls.exts:
|
||||
if subfile_name.endswith(ext):
|
||||
results.append(os.path.join(full_path, subfile_name))
|
||||
continue
|
||||
|
||||
# *requirements*.{txt,in}
|
||||
if os.path.isfile(full_path):
|
||||
for ext in cls.exts:
|
||||
if fname.endswith(ext):
|
||||
results.append(full_path)
|
||||
break
|
||||
|
||||
return results
|
||||
|
||||
def _get_names(self, path: str) -> Iterator[str]:
|
||||
"""Load required packages from path to requirements file"""
|
||||
yield from self._get_names_cached(path)
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=16)
|
||||
def _get_names_cached(cls, path: str) -> List[str]:
|
||||
result = []
|
||||
|
||||
with chdir(os.path.dirname(path)):
|
||||
requirements = parse_requirements(path)
|
||||
for req in requirements.values():
|
||||
if req.name:
|
||||
result.append(req.name)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class PipfileFinder(ReqsBaseFinder):
|
||||
enabled = bool(Pipfile)
|
||||
|
||||
def _get_names(self, path: str) -> Iterator[str]:
|
||||
with chdir(path):
|
||||
project = Pipfile.load(path)
|
||||
for req in project.packages:
|
||||
yield req.name
|
||||
|
||||
def _get_files_from_dir(self, path: str) -> Iterator[str]:
|
||||
if "Pipfile" in os.listdir(path):
|
||||
yield path
|
||||
|
||||
|
||||
class DefaultFinder(BaseFinder):
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
return self.config.default_section
|
||||
|
||||
|
||||
class FindersManager:
|
||||
_default_finders_classes: Sequence[Type[BaseFinder]] = (
|
||||
ForcedSeparateFinder,
|
||||
LocalFinder,
|
||||
KnownPatternFinder,
|
||||
PathFinder,
|
||||
PipfileFinder,
|
||||
RequirementsFinder,
|
||||
DefaultFinder,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, config: Config, finder_classes: Optional[Iterable[Type[BaseFinder]]] = None
|
||||
) -> None:
|
||||
self.verbose: bool = config.verbose
|
||||
|
||||
if finder_classes is None:
|
||||
finder_classes = self._default_finders_classes
|
||||
finders: List[BaseFinder] = []
|
||||
for finder_cls in finder_classes:
|
||||
try:
|
||||
finders.append(finder_cls(config))
|
||||
except Exception as exception:
|
||||
# if one finder fails to instantiate isort can continue using the rest
|
||||
if self.verbose:
|
||||
print(
|
||||
(
|
||||
f"{finder_cls.__name__} encountered an error ({exception}) during "
|
||||
"instantiation and cannot be used"
|
||||
)
|
||||
)
|
||||
self.finders: Tuple[BaseFinder, ...] = tuple(finders)
|
||||
|
||||
def find(self, module_name: str) -> Optional[str]:
|
||||
for finder in self.finders:
|
||||
try:
|
||||
section = finder.find(module_name)
|
||||
if section is not None:
|
||||
return section
|
||||
except Exception as exception:
|
||||
# isort has to be able to keep trying to identify the correct
|
||||
# import section even if one approach fails
|
||||
if self.verbose:
|
||||
print(
|
||||
f"{finder.__class__.__name__} encountered an error ({exception}) while "
|
||||
f"trying to identify the {module_name} module"
|
||||
)
|
||||
return None
|
||||
197
.venv/lib/python3.8/site-packages/isort/exceptions.py
Normal file
197
.venv/lib/python3.8/site-packages/isort/exceptions.py
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
"""All isort specific exception classes should be defined here"""
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Type, Union
|
||||
|
||||
from .profiles import profiles
|
||||
|
||||
|
||||
class ISortError(Exception):
|
||||
"""Base isort exception object from which all isort sourced exceptions should inherit"""
|
||||
|
||||
def __reduce__(self): # type: ignore
|
||||
return (partial(type(self), **self.__dict__), ())
|
||||
|
||||
|
||||
class InvalidSettingsPath(ISortError):
|
||||
"""Raised when a settings path is provided that is neither a valid file or directory"""
|
||||
|
||||
def __init__(self, settings_path: str):
|
||||
super().__init__(
|
||||
f"isort was told to use the settings_path: {settings_path} as the base directory or "
|
||||
"file that represents the starting point of config file discovery, but it does not "
|
||||
"exist."
|
||||
)
|
||||
self.settings_path = settings_path
|
||||
|
||||
|
||||
class ExistingSyntaxErrors(ISortError):
|
||||
"""Raised when isort is told to sort imports within code that has existing syntax errors"""
|
||||
|
||||
def __init__(self, file_path: str):
|
||||
super().__init__(
|
||||
f"isort was told to sort imports within code that contains syntax errors: "
|
||||
f"{file_path}."
|
||||
)
|
||||
self.file_path = file_path
|
||||
|
||||
|
||||
class IntroducedSyntaxErrors(ISortError):
|
||||
"""Raised when isort has introduced a syntax error in the process of sorting imports"""
|
||||
|
||||
def __init__(self, file_path: str):
|
||||
super().__init__(
|
||||
f"isort introduced syntax errors when attempting to sort the imports contained within "
|
||||
f"{file_path}."
|
||||
)
|
||||
self.file_path = file_path
|
||||
|
||||
|
||||
class FileSkipped(ISortError):
|
||||
"""Should be raised when a file is skipped for any reason"""
|
||||
|
||||
def __init__(self, message: str, file_path: str):
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
self.file_path = file_path
|
||||
|
||||
|
||||
class FileSkipComment(FileSkipped):
|
||||
"""Raised when an entire file is skipped due to a isort skip file comment"""
|
||||
|
||||
def __init__(self, file_path: str, **kwargs: str):
|
||||
super().__init__(
|
||||
f"{file_path} contains a file skip comment and was skipped.", file_path=file_path
|
||||
)
|
||||
|
||||
|
||||
class FileSkipSetting(FileSkipped):
|
||||
"""Raised when an entire file is skipped due to provided isort settings"""
|
||||
|
||||
def __init__(self, file_path: str, **kwargs: str):
|
||||
super().__init__(
|
||||
f"{file_path} was skipped as it's listed in 'skip' setting"
|
||||
" or matches a glob in 'skip_glob' setting",
|
||||
file_path=file_path,
|
||||
)
|
||||
|
||||
|
||||
class ProfileDoesNotExist(ISortError):
|
||||
"""Raised when a profile is set by the user that doesn't exist"""
|
||||
|
||||
def __init__(self, profile: str):
|
||||
super().__init__(
|
||||
f"Specified profile of {profile} does not exist. "
|
||||
f"Available profiles: {','.join(profiles)}."
|
||||
)
|
||||
self.profile = profile
|
||||
|
||||
|
||||
class SortingFunctionDoesNotExist(ISortError):
|
||||
"""Raised when the specified sorting function isn't available"""
|
||||
|
||||
def __init__(self, sort_order: str, available_sort_orders: List[str]):
|
||||
super().__init__(
|
||||
f"Specified sort_order of {sort_order} does not exist. "
|
||||
f"Available sort_orders: {','.join(available_sort_orders)}."
|
||||
)
|
||||
self.sort_order = sort_order
|
||||
self.available_sort_orders = available_sort_orders
|
||||
|
||||
|
||||
class FormattingPluginDoesNotExist(ISortError):
|
||||
"""Raised when a formatting plugin is set by the user that doesn't exist"""
|
||||
|
||||
def __init__(self, formatter: str):
|
||||
super().__init__(f"Specified formatting plugin of {formatter} does not exist. ")
|
||||
self.formatter = formatter
|
||||
|
||||
|
||||
class LiteralParsingFailure(ISortError):
|
||||
"""Raised when one of isorts literal sorting comments is used but isort can't parse the
|
||||
the given data structure.
|
||||
"""
|
||||
|
||||
def __init__(self, code: str, original_error: Union[Exception, Type[Exception]]):
|
||||
super().__init__(
|
||||
f"isort failed to parse the given literal {code}. It's important to note "
|
||||
"that isort literal sorting only supports simple literals parsable by "
|
||||
f"ast.literal_eval which gave the exception of {original_error}."
|
||||
)
|
||||
self.code = code
|
||||
self.original_error = original_error
|
||||
|
||||
|
||||
class LiteralSortTypeMismatch(ISortError):
|
||||
"""Raised when an isort literal sorting comment is used, with a type that doesn't match the
|
||||
supplied data structure's type.
|
||||
"""
|
||||
|
||||
def __init__(self, kind: type, expected_kind: type):
|
||||
super().__init__(
|
||||
f"isort was told to sort a literal of type {expected_kind} but was given "
|
||||
f"a literal of type {kind}."
|
||||
)
|
||||
self.kind = kind
|
||||
self.expected_kind = expected_kind
|
||||
|
||||
|
||||
class AssignmentsFormatMismatch(ISortError):
|
||||
"""Raised when isort is told to sort assignments but the format of the assignment section
|
||||
doesn't match isort's expectation.
|
||||
"""
|
||||
|
||||
def __init__(self, code: str):
|
||||
super().__init__(
|
||||
"isort was told to sort a section of assignments, however the given code:\n\n"
|
||||
f"{code}\n\n"
|
||||
"Does not match isort's strict single line formatting requirement for assignment "
|
||||
"sorting:\n\n"
|
||||
"{variable_name} = {value}\n"
|
||||
"{variable_name2} = {value2}\n"
|
||||
"...\n\n"
|
||||
)
|
||||
self.code = code
|
||||
|
||||
|
||||
class UnsupportedSettings(ISortError):
|
||||
"""Raised when settings are passed into isort (either from config, CLI, or runtime)
|
||||
that it doesn't support.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _format_option(name: str, value: Any, source: str) -> str:
|
||||
return f"\t- {name} = {value} (source: '{source}')"
|
||||
|
||||
def __init__(self, unsupported_settings: Dict[str, Dict[str, str]]):
|
||||
errors = "\n".join(
|
||||
self._format_option(name, **option) for name, option in unsupported_settings.items()
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
"isort was provided settings that it doesn't support:\n\n"
|
||||
f"{errors}\n\n"
|
||||
"For a complete and up-to-date listing of supported settings see: "
|
||||
"https://pycqa.github.io/isort/docs/configuration/options.\n"
|
||||
)
|
||||
self.unsupported_settings = unsupported_settings
|
||||
|
||||
|
||||
class UnsupportedEncoding(ISortError):
|
||||
"""Raised when isort encounters an encoding error while trying to read a file"""
|
||||
|
||||
def __init__(self, filename: Union[str, Path]):
|
||||
super().__init__(f"Unknown or unsupported encoding in {filename}")
|
||||
self.filename = filename
|
||||
|
||||
|
||||
class MissingSection(ISortError):
|
||||
"""Raised when isort encounters an import that matches a section that is not defined"""
|
||||
|
||||
def __init__(self, import_module: str, section: str):
|
||||
super().__init__(
|
||||
f"Found {import_module} import while parsing, but {section} was not included "
|
||||
"in the `sections` setting of your config. Please add it before continuing\n"
|
||||
"See https://pycqa.github.io/isort/#custom-sections-and-ordering "
|
||||
"for more info."
|
||||
)
|
||||
41
.venv/lib/python3.8/site-packages/isort/files.py
Normal file
41
.venv/lib/python3.8/site-packages/isort/files.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Iterator, List, Set
|
||||
|
||||
from isort.settings import Config
|
||||
|
||||
|
||||
def find(
|
||||
paths: Iterable[str], config: Config, skipped: List[str], broken: List[str]
|
||||
) -> Iterator[str]:
|
||||
"""Fines and provides an iterator for all Python source files defined in paths."""
|
||||
visited_dirs: Set[Path] = set()
|
||||
|
||||
for path in paths:
|
||||
if os.path.isdir(path):
|
||||
for dirpath, dirnames, filenames in os.walk(
|
||||
path, topdown=True, followlinks=config.follow_links
|
||||
):
|
||||
base_path = Path(dirpath)
|
||||
for dirname in list(dirnames):
|
||||
full_path = base_path / dirname
|
||||
resolved_path = full_path.resolve()
|
||||
if config.is_skipped(full_path):
|
||||
skipped.append(dirname)
|
||||
dirnames.remove(dirname)
|
||||
else:
|
||||
if resolved_path in visited_dirs: # pragma: no cover
|
||||
dirnames.remove(dirname)
|
||||
visited_dirs.add(resolved_path)
|
||||
|
||||
for filename in filenames:
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
if config.is_supported_filetype(filepath):
|
||||
if config.is_skipped(Path(os.path.abspath(filepath))):
|
||||
skipped.append(filename)
|
||||
else:
|
||||
yield filepath
|
||||
elif not os.path.exists(path):
|
||||
broken.append(path)
|
||||
else:
|
||||
yield path
|
||||
156
.venv/lib/python3.8/site-packages/isort/format.py
Normal file
156
.venv/lib/python3.8/site-packages/isort/format.py
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
import re
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from difflib import unified_diff
|
||||
from pathlib import Path
|
||||
from typing import Optional, TextIO
|
||||
|
||||
try:
|
||||
import colorama
|
||||
except ImportError:
|
||||
colorama_unavailable = True
|
||||
else:
|
||||
colorama_unavailable = False
|
||||
colorama.init(strip=False)
|
||||
|
||||
|
||||
ADDED_LINE_PATTERN = re.compile(r"\+[^+]")
|
||||
REMOVED_LINE_PATTERN = re.compile(r"-[^-]")
|
||||
|
||||
|
||||
def format_simplified(import_line: str) -> str:
|
||||
import_line = import_line.strip()
|
||||
if import_line.startswith("from "):
|
||||
import_line = import_line.replace("from ", "")
|
||||
import_line = import_line.replace(" import ", ".")
|
||||
elif import_line.startswith("import "):
|
||||
import_line = import_line.replace("import ", "")
|
||||
|
||||
return import_line
|
||||
|
||||
|
||||
def format_natural(import_line: str) -> str:
|
||||
import_line = import_line.strip()
|
||||
if not import_line.startswith("from ") and not import_line.startswith("import "):
|
||||
if "." not in import_line:
|
||||
return f"import {import_line}"
|
||||
parts = import_line.split(".")
|
||||
end = parts.pop(-1)
|
||||
return f"from {'.'.join(parts)} import {end}"
|
||||
|
||||
return import_line
|
||||
|
||||
|
||||
def show_unified_diff(
|
||||
*,
|
||||
file_input: str,
|
||||
file_output: str,
|
||||
file_path: Optional[Path],
|
||||
output: Optional[TextIO] = None,
|
||||
color_output: bool = False,
|
||||
) -> None:
|
||||
"""Shows a unified_diff for the provided input and output against the provided file path.
|
||||
|
||||
- **file_input**: A string that represents the contents of a file before changes.
|
||||
- **file_output**: A string that represents the contents of a file after changes.
|
||||
- **file_path**: A Path object that represents the file path of the file being changed.
|
||||
- **output**: A stream to output the diff to. If non is provided uses sys.stdout.
|
||||
- **color_output**: Use color in output if True.
|
||||
"""
|
||||
printer = create_terminal_printer(color_output, output)
|
||||
file_name = "" if file_path is None else str(file_path)
|
||||
file_mtime = str(
|
||||
datetime.now() if file_path is None else datetime.fromtimestamp(file_path.stat().st_mtime)
|
||||
)
|
||||
unified_diff_lines = unified_diff(
|
||||
file_input.splitlines(keepends=True),
|
||||
file_output.splitlines(keepends=True),
|
||||
fromfile=file_name + ":before",
|
||||
tofile=file_name + ":after",
|
||||
fromfiledate=file_mtime,
|
||||
tofiledate=str(datetime.now()),
|
||||
)
|
||||
for line in unified_diff_lines:
|
||||
printer.diff_line(line)
|
||||
|
||||
|
||||
def ask_whether_to_apply_changes_to_file(file_path: str) -> bool:
|
||||
answer = None
|
||||
while answer not in ("yes", "y", "no", "n", "quit", "q"):
|
||||
answer = input(f"Apply suggested changes to '{file_path}' [y/n/q]? ") # nosec
|
||||
answer = answer.lower()
|
||||
if answer in ("no", "n"):
|
||||
return False
|
||||
if answer in ("quit", "q"):
|
||||
sys.exit(1)
|
||||
return True
|
||||
|
||||
|
||||
def remove_whitespace(content: str, line_separator: str = "\n") -> str:
|
||||
content = content.replace(line_separator, "").replace(" ", "").replace("\x0c", "")
|
||||
return content
|
||||
|
||||
|
||||
class BasicPrinter:
|
||||
ERROR = "ERROR"
|
||||
SUCCESS = "SUCCESS"
|
||||
|
||||
def __init__(self, error: str, success: str, output: Optional[TextIO] = None):
|
||||
self.output = output or sys.stdout
|
||||
self.success_message = success
|
||||
self.error_message = error
|
||||
|
||||
def success(self, message: str) -> None:
|
||||
print(self.success_message.format(success=self.SUCCESS, message=message), file=self.output)
|
||||
|
||||
def error(self, message: str) -> None:
|
||||
print(self.error_message.format(error=self.ERROR, message=message), file=sys.stderr)
|
||||
|
||||
def diff_line(self, line: str) -> None:
|
||||
self.output.write(line)
|
||||
|
||||
|
||||
class ColoramaPrinter(BasicPrinter):
|
||||
def __init__(self, error: str, success: str, output: Optional[TextIO]):
|
||||
super().__init__(error, success, output=output)
|
||||
|
||||
# Note: this constants are instance variables instead ofs class variables
|
||||
# because they refer to colorama which might not be installed.
|
||||
self.ERROR = self.style_text("ERROR", colorama.Fore.RED)
|
||||
self.SUCCESS = self.style_text("SUCCESS", colorama.Fore.GREEN)
|
||||
self.ADDED_LINE = colorama.Fore.GREEN
|
||||
self.REMOVED_LINE = colorama.Fore.RED
|
||||
|
||||
@staticmethod
|
||||
def style_text(text: str, style: Optional[str] = None) -> str:
|
||||
if style is None:
|
||||
return text
|
||||
return style + text + str(colorama.Style.RESET_ALL)
|
||||
|
||||
def diff_line(self, line: str) -> None:
|
||||
style = None
|
||||
if re.match(ADDED_LINE_PATTERN, line):
|
||||
style = self.ADDED_LINE
|
||||
elif re.match(REMOVED_LINE_PATTERN, line):
|
||||
style = self.REMOVED_LINE
|
||||
self.output.write(self.style_text(line, style))
|
||||
|
||||
|
||||
def create_terminal_printer(
|
||||
color: bool, output: Optional[TextIO] = None, error: str = "", success: str = ""
|
||||
) -> BasicPrinter:
|
||||
if color and colorama_unavailable:
|
||||
no_colorama_message = (
|
||||
"\n"
|
||||
"Sorry, but to use --color (color_output) the colorama python package is required.\n\n"
|
||||
"Reference: https://pypi.org/project/colorama/\n\n"
|
||||
"You can either install it separately on your system or as the colors extra "
|
||||
"for isort. Ex: \n\n"
|
||||
"$ pip install isort[colors]\n"
|
||||
)
|
||||
print(no_colorama_message, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
return (
|
||||
ColoramaPrinter(error, success, output) if color else BasicPrinter(error, success, output)
|
||||
)
|
||||
86
.venv/lib/python3.8/site-packages/isort/hooks.py
Normal file
86
.venv/lib/python3.8/site-packages/isort/hooks.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
"""Defines a git hook to allow pre-commit warnings and errors about import order.
|
||||
|
||||
usage:
|
||||
exit_code = git_hook(strict=True|False, modify=True|False)
|
||||
"""
|
||||
import os
|
||||
import subprocess # nosec - Needed for hook
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from isort import Config, api, exceptions
|
||||
|
||||
|
||||
def get_output(command: List[str]) -> str:
|
||||
"""Run a command and return raw output
|
||||
|
||||
:param str command: the command to run
|
||||
:returns: the stdout output of the command
|
||||
"""
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, check=True) # nosec - trusted input
|
||||
return result.stdout.decode()
|
||||
|
||||
|
||||
def get_lines(command: List[str]) -> List[str]:
|
||||
"""Run a command and return lines of output
|
||||
|
||||
:param str command: the command to run
|
||||
:returns: list of whitespace-stripped lines output by command
|
||||
"""
|
||||
stdout = get_output(command)
|
||||
return [line.strip() for line in stdout.splitlines()]
|
||||
|
||||
|
||||
def git_hook(
|
||||
strict: bool = False, modify: bool = False, lazy: bool = False, settings_file: str = ""
|
||||
) -> int:
|
||||
"""Git pre-commit hook to check staged files for isort errors
|
||||
|
||||
:param bool strict - if True, return number of errors on exit,
|
||||
causing the hook to fail. If False, return zero so it will
|
||||
just act as a warning.
|
||||
:param bool modify - if True, fix the sources if they are not
|
||||
sorted properly. If False, only report result without
|
||||
modifying anything.
|
||||
:param bool lazy - if True, also check/fix unstaged files.
|
||||
This is useful if you frequently use ``git commit -a`` for example.
|
||||
If False, only check/fix the staged files for isort errors.
|
||||
:param str settings_file - A path to a file to be used as
|
||||
the configuration file for this run.
|
||||
When settings_file is the empty string, the configuration file
|
||||
will be searched starting at the directory containing the first
|
||||
staged file, if any, and going upward in the directory structure.
|
||||
|
||||
:return number of errors if in strict mode, 0 otherwise.
|
||||
"""
|
||||
# Get list of files modified and staged
|
||||
diff_cmd = ["git", "diff-index", "--cached", "--name-only", "--diff-filter=ACMRTUXB", "HEAD"]
|
||||
if lazy:
|
||||
diff_cmd.remove("--cached")
|
||||
|
||||
files_modified = get_lines(diff_cmd)
|
||||
if not files_modified:
|
||||
return 0
|
||||
|
||||
errors = 0
|
||||
config = Config(
|
||||
settings_file=settings_file,
|
||||
settings_path=os.path.dirname(os.path.abspath(files_modified[0])),
|
||||
)
|
||||
for filename in files_modified:
|
||||
if filename.endswith(".py"):
|
||||
# Get the staged contents of the file
|
||||
staged_cmd = ["git", "show", f":{filename}"]
|
||||
staged_contents = get_output(staged_cmd)
|
||||
|
||||
try:
|
||||
if not api.check_code_string(
|
||||
staged_contents, file_path=Path(filename), config=config
|
||||
):
|
||||
errors += 1
|
||||
if modify:
|
||||
api.sort_file(filename, config=config)
|
||||
except exceptions.FileSkipped: # pragma: no cover
|
||||
pass
|
||||
|
||||
return errors if strict else 0
|
||||
206
.venv/lib/python3.8/site-packages/isort/identify.py
Normal file
206
.venv/lib/python3.8/site-packages/isort/identify.py
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
"""Fast stream based import identification.
|
||||
Eventually this will likely replace parse.py
|
||||
"""
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import Iterator, NamedTuple, Optional, TextIO, Tuple
|
||||
|
||||
from isort.parse import _normalize_line, _strip_syntax, skip_line
|
||||
|
||||
from .comments import parse as parse_comments
|
||||
from .settings import DEFAULT_CONFIG, Config
|
||||
|
||||
STATEMENT_DECLARATIONS: Tuple[str, ...] = ("def ", "cdef ", "cpdef ", "class ", "@", "async def")
|
||||
|
||||
|
||||
class Import(NamedTuple):
|
||||
line_number: int
|
||||
indented: bool
|
||||
module: str
|
||||
attribute: Optional[str] = None
|
||||
alias: Optional[str] = None
|
||||
cimport: bool = False
|
||||
file_path: Optional[Path] = None
|
||||
|
||||
def statement(self) -> str:
|
||||
import_cmd = "cimport" if self.cimport else "import"
|
||||
if self.attribute:
|
||||
import_string = f"from {self.module} {import_cmd} {self.attribute}"
|
||||
else:
|
||||
import_string = f"{import_cmd} {self.module}"
|
||||
if self.alias:
|
||||
import_string += f" as {self.alias}"
|
||||
return import_string
|
||||
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
f"{self.file_path or ''}:{self.line_number} "
|
||||
f"{'indented ' if self.indented else ''}{self.statement()}"
|
||||
)
|
||||
|
||||
|
||||
def imports(
|
||||
input_stream: TextIO,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
file_path: Optional[Path] = None,
|
||||
top_only: bool = False,
|
||||
) -> Iterator[Import]:
|
||||
"""Parses a python file taking out and categorizing imports."""
|
||||
in_quote = ""
|
||||
|
||||
indexed_input = enumerate(input_stream)
|
||||
for index, raw_line in indexed_input:
|
||||
(skipping_line, in_quote) = skip_line(
|
||||
raw_line, in_quote=in_quote, index=index, section_comments=config.section_comments
|
||||
)
|
||||
|
||||
if top_only and not in_quote and raw_line.startswith(STATEMENT_DECLARATIONS):
|
||||
break
|
||||
if skipping_line:
|
||||
continue
|
||||
|
||||
stripped_line = raw_line.strip().split("#")[0]
|
||||
if stripped_line.startswith("raise") or stripped_line.startswith("yield"):
|
||||
if stripped_line == "yield":
|
||||
while not stripped_line or stripped_line == "yield":
|
||||
try:
|
||||
index, next_line = next(indexed_input)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
stripped_line = next_line.strip().split("#")[0]
|
||||
while stripped_line.endswith("\\"):
|
||||
try:
|
||||
index, next_line = next(indexed_input)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
stripped_line = next_line.strip().split("#")[0]
|
||||
continue # pragma: no cover
|
||||
|
||||
line, *end_of_line_comment = raw_line.split("#", 1)
|
||||
statements = [line.strip() for line in line.split(";")]
|
||||
if end_of_line_comment:
|
||||
statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}"
|
||||
|
||||
for statement in statements:
|
||||
line, _raw_line = _normalize_line(statement)
|
||||
if line.startswith(("import ", "cimport ")):
|
||||
type_of_import = "straight"
|
||||
elif line.startswith("from "):
|
||||
type_of_import = "from"
|
||||
else:
|
||||
continue # pragma: no cover
|
||||
|
||||
import_string, _ = parse_comments(line)
|
||||
normalized_import_string = (
|
||||
import_string.replace("import(", "import (").replace("\\", " ").replace("\n", " ")
|
||||
)
|
||||
cimports: bool = (
|
||||
" cimport " in normalized_import_string
|
||||
or normalized_import_string.startswith("cimport")
|
||||
)
|
||||
identified_import = partial(
|
||||
Import,
|
||||
index + 1, # line numbers use 1 based indexing
|
||||
raw_line.startswith((" ", "\t")),
|
||||
cimport=cimports,
|
||||
file_path=file_path,
|
||||
)
|
||||
|
||||
if "(" in line.split("#", 1)[0]:
|
||||
while not line.split("#")[0].strip().endswith(")"):
|
||||
try:
|
||||
index, next_line = next(indexed_input)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
line, _ = parse_comments(next_line)
|
||||
import_string += "\n" + line
|
||||
else:
|
||||
while line.strip().endswith("\\"):
|
||||
try:
|
||||
index, next_line = next(indexed_input)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
line, _ = parse_comments(next_line)
|
||||
|
||||
# Still need to check for parentheses after an escaped line
|
||||
if "(" in line.split("#")[0] and ")" not in line.split("#")[0]:
|
||||
import_string += "\n" + line
|
||||
|
||||
while not line.split("#")[0].strip().endswith(")"):
|
||||
try:
|
||||
index, next_line = next(indexed_input)
|
||||
except StopIteration:
|
||||
break
|
||||
line, _ = parse_comments(next_line)
|
||||
import_string += "\n" + line
|
||||
else:
|
||||
if import_string.strip().endswith(
|
||||
(" import", " cimport")
|
||||
) or line.strip().startswith(("import ", "cimport ")):
|
||||
import_string += "\n" + line
|
||||
else:
|
||||
import_string = (
|
||||
import_string.rstrip().rstrip("\\") + " " + line.lstrip()
|
||||
)
|
||||
|
||||
if type_of_import == "from":
|
||||
import_string = (
|
||||
import_string.replace("import(", "import (")
|
||||
.replace("\\", " ")
|
||||
.replace("\n", " ")
|
||||
)
|
||||
parts = import_string.split(" cimport " if cimports else " import ")
|
||||
|
||||
from_import = parts[0].split(" ")
|
||||
import_string = (" cimport " if cimports else " import ").join(
|
||||
[from_import[0] + " " + "".join(from_import[1:])] + parts[1:]
|
||||
)
|
||||
|
||||
just_imports = [
|
||||
item.replace("{|", "{ ").replace("|}", " }")
|
||||
for item in _strip_syntax(import_string).split()
|
||||
]
|
||||
|
||||
direct_imports = just_imports[1:]
|
||||
top_level_module = ""
|
||||
if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports):
|
||||
while "as" in just_imports:
|
||||
attribute = None
|
||||
as_index = just_imports.index("as")
|
||||
if type_of_import == "from":
|
||||
attribute = just_imports[as_index - 1]
|
||||
top_level_module = just_imports[0]
|
||||
module = top_level_module + "." + attribute
|
||||
alias = just_imports[as_index + 1]
|
||||
direct_imports.remove(attribute)
|
||||
direct_imports.remove(alias)
|
||||
direct_imports.remove("as")
|
||||
just_imports[1:] = direct_imports
|
||||
if attribute == alias and config.remove_redundant_aliases:
|
||||
yield identified_import(top_level_module, attribute)
|
||||
else:
|
||||
yield identified_import(top_level_module, attribute, alias=alias)
|
||||
|
||||
else:
|
||||
module = just_imports[as_index - 1]
|
||||
alias = just_imports[as_index + 1]
|
||||
just_imports.remove(alias)
|
||||
just_imports.remove("as")
|
||||
just_imports.remove(module)
|
||||
if module == alias and config.remove_redundant_aliases:
|
||||
yield identified_import(module)
|
||||
else:
|
||||
yield identified_import(module, alias=alias)
|
||||
|
||||
if just_imports:
|
||||
if type_of_import == "from":
|
||||
module = just_imports.pop(0)
|
||||
for attribute in just_imports:
|
||||
yield identified_import(module, attribute)
|
||||
else:
|
||||
for module in just_imports:
|
||||
yield identified_import(module)
|
||||
73
.venv/lib/python3.8/site-packages/isort/io.py
Normal file
73
.venv/lib/python3.8/site-packages/isort/io.py
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
"""Defines any IO utilities used by isort"""
|
||||
import re
|
||||
import tokenize
|
||||
from contextlib import contextmanager
|
||||
from io import BytesIO, StringIO, TextIOWrapper
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Iterator, TextIO, Union
|
||||
|
||||
from isort._future import dataclass
|
||||
from isort.exceptions import UnsupportedEncoding
|
||||
|
||||
_ENCODING_PATTERN = re.compile(br"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class File:
|
||||
stream: TextIO
|
||||
path: Path
|
||||
encoding: str
|
||||
|
||||
@staticmethod
|
||||
def detect_encoding(filename: Union[str, Path], readline: Callable[[], bytes]) -> str:
|
||||
try:
|
||||
return tokenize.detect_encoding(readline)[0]
|
||||
except Exception:
|
||||
raise UnsupportedEncoding(filename)
|
||||
|
||||
@staticmethod
|
||||
def from_contents(contents: str, filename: str) -> "File":
|
||||
encoding = File.detect_encoding(filename, BytesIO(contents.encode("utf-8")).readline)
|
||||
return File( # type: ignore
|
||||
stream=StringIO(contents), path=Path(filename).resolve(), encoding=encoding
|
||||
)
|
||||
|
||||
@property
|
||||
def extension(self) -> str:
|
||||
return self.path.suffix.lstrip(".")
|
||||
|
||||
@staticmethod
|
||||
def _open(filename: Union[str, Path]) -> TextIOWrapper:
|
||||
"""Open a file in read only mode using the encoding detected by
|
||||
detect_encoding().
|
||||
"""
|
||||
buffer = open(filename, "rb")
|
||||
try:
|
||||
encoding = File.detect_encoding(filename, buffer.readline)
|
||||
buffer.seek(0)
|
||||
text = TextIOWrapper(buffer, encoding, line_buffering=True, newline="")
|
||||
text.mode = "r" # type: ignore
|
||||
return text
|
||||
except Exception:
|
||||
buffer.close()
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
@contextmanager
|
||||
def read(filename: Union[str, Path]) -> Iterator["File"]:
|
||||
file_path = Path(filename).resolve()
|
||||
stream = None
|
||||
try:
|
||||
stream = File._open(file_path)
|
||||
yield File(stream=stream, path=file_path, encoding=stream.encoding) # type: ignore
|
||||
finally:
|
||||
if stream is not None:
|
||||
stream.close()
|
||||
|
||||
|
||||
class _EmptyIO(StringIO):
|
||||
def write(self, *args: Any, **kwargs: Any) -> None: # type: ignore # skipcq: PTC-W0049
|
||||
pass
|
||||
|
||||
|
||||
Empty = _EmptyIO()
|
||||
113
.venv/lib/python3.8/site-packages/isort/literal.py
Normal file
113
.venv/lib/python3.8/site-packages/isort/literal.py
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
import ast
|
||||
from pprint import PrettyPrinter
|
||||
from typing import Any, Callable, Dict, List, Set, Tuple
|
||||
|
||||
from isort.exceptions import (
|
||||
AssignmentsFormatMismatch,
|
||||
LiteralParsingFailure,
|
||||
LiteralSortTypeMismatch,
|
||||
)
|
||||
from isort.settings import DEFAULT_CONFIG, Config
|
||||
|
||||
|
||||
class ISortPrettyPrinter(PrettyPrinter):
|
||||
"""an isort customized pretty printer for sorted literals"""
|
||||
|
||||
def __init__(self, config: Config):
|
||||
super().__init__(width=config.line_length, compact=True)
|
||||
|
||||
|
||||
type_mapping: Dict[str, Tuple[type, Callable[[Any, ISortPrettyPrinter], str]]] = {}
|
||||
|
||||
|
||||
def assignments(code: str) -> str:
|
||||
values = {}
|
||||
for line in code.splitlines(keepends=True):
|
||||
if not line.strip():
|
||||
continue
|
||||
if " = " not in line:
|
||||
raise AssignmentsFormatMismatch(code)
|
||||
variable_name, value = line.split(" = ", 1)
|
||||
values[variable_name] = value
|
||||
|
||||
return "".join(
|
||||
f"{variable_name} = {values[variable_name]}" for variable_name in sorted(values.keys())
|
||||
)
|
||||
|
||||
|
||||
def assignment(code: str, sort_type: str, extension: str, config: Config = DEFAULT_CONFIG) -> str:
|
||||
"""Sorts the literal present within the provided code against the provided sort type,
|
||||
returning the sorted representation of the source code.
|
||||
"""
|
||||
if sort_type == "assignments":
|
||||
return assignments(code)
|
||||
if sort_type not in type_mapping:
|
||||
raise ValueError(
|
||||
"Trying to sort using an undefined sort_type. "
|
||||
f"Defined sort types are {', '.join(type_mapping.keys())}."
|
||||
)
|
||||
|
||||
variable_name, literal = code.split(" = ")
|
||||
variable_name = variable_name.lstrip()
|
||||
try:
|
||||
value = ast.literal_eval(literal)
|
||||
except Exception as error:
|
||||
raise LiteralParsingFailure(code, error)
|
||||
|
||||
expected_type, sort_function = type_mapping[sort_type]
|
||||
if type(value) != expected_type:
|
||||
raise LiteralSortTypeMismatch(type(value), expected_type)
|
||||
|
||||
printer = ISortPrettyPrinter(config)
|
||||
sorted_value_code = f"{variable_name} = {sort_function(value, printer)}"
|
||||
if config.formatting_function:
|
||||
sorted_value_code = config.formatting_function(
|
||||
sorted_value_code, extension, config
|
||||
).rstrip()
|
||||
|
||||
sorted_value_code += code[len(code.rstrip()) :]
|
||||
return sorted_value_code
|
||||
|
||||
|
||||
def register_type(
|
||||
name: str, kind: type
|
||||
) -> Callable[[Callable[[Any, ISortPrettyPrinter], str]], Callable[[Any, ISortPrettyPrinter], str]]:
|
||||
"""Registers a new literal sort type."""
|
||||
|
||||
def wrap(
|
||||
function: Callable[[Any, ISortPrettyPrinter], str]
|
||||
) -> Callable[[Any, ISortPrettyPrinter], str]:
|
||||
type_mapping[name] = (kind, function)
|
||||
return function
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
@register_type("dict", dict)
|
||||
def _dict(value: Dict[Any, Any], printer: ISortPrettyPrinter) -> str:
|
||||
return printer.pformat(dict(sorted(value.items(), key=lambda item: item[1]))) # type: ignore
|
||||
|
||||
|
||||
@register_type("list", list)
|
||||
def _list(value: List[Any], printer: ISortPrettyPrinter) -> str:
|
||||
return printer.pformat(sorted(value))
|
||||
|
||||
|
||||
@register_type("unique-list", list)
|
||||
def _unique_list(value: List[Any], printer: ISortPrettyPrinter) -> str:
|
||||
return printer.pformat(list(sorted(set(value))))
|
||||
|
||||
|
||||
@register_type("set", set)
|
||||
def _set(value: Set[Any], printer: ISortPrettyPrinter) -> str:
|
||||
return "{" + printer.pformat(tuple(sorted(value)))[1:-1] + "}"
|
||||
|
||||
|
||||
@register_type("tuple", tuple)
|
||||
def _tuple(value: Tuple[Any, ...], printer: ISortPrettyPrinter) -> str:
|
||||
return printer.pformat(tuple(sorted(value)))
|
||||
|
||||
|
||||
@register_type("unique-tuple", tuple)
|
||||
def _unique_tuple(value: Tuple[Any, ...], printer: ISortPrettyPrinter) -> str:
|
||||
return printer.pformat(tuple(sorted(set(value))))
|
||||
19
.venv/lib/python3.8/site-packages/isort/logo.py
Normal file
19
.venv/lib/python3.8/site-packages/isort/logo.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
from ._version import __version__
|
||||
|
||||
ASCII_ART = rf"""
|
||||
_ _
|
||||
(_) ___ ___ _ __| |_
|
||||
| |/ _/ / _ \/ '__ _/
|
||||
| |\__ \/\_\/| | | |_
|
||||
|_|\___/\___/\_/ \_/
|
||||
|
||||
isort your imports, so you don't have to.
|
||||
|
||||
VERSION {__version__}
|
||||
"""
|
||||
|
||||
__doc__ = f"""
|
||||
```python
|
||||
{ASCII_ART}
|
||||
```
|
||||
"""
|
||||
1285
.venv/lib/python3.8/site-packages/isort/main.py
Normal file
1285
.venv/lib/python3.8/site-packages/isort/main.py
Normal file
File diff suppressed because it is too large
Load diff
655
.venv/lib/python3.8/site-packages/isort/output.py
Normal file
655
.venv/lib/python3.8/site-packages/isort/output.py
Normal file
|
|
@ -0,0 +1,655 @@
|
|||
import copy
|
||||
import itertools
|
||||
from functools import partial
|
||||
from typing import Any, Iterable, List, Optional, Set, Tuple, Type
|
||||
|
||||
from isort.format import format_simplified
|
||||
|
||||
from . import parse, sorting, wrap
|
||||
from .comments import add_to_line as with_comments
|
||||
from .identify import STATEMENT_DECLARATIONS
|
||||
from .settings import DEFAULT_CONFIG, Config
|
||||
|
||||
|
||||
def sorted_imports(
|
||||
parsed: parse.ParsedContent,
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
extension: str = "py",
|
||||
import_type: str = "import",
|
||||
) -> str:
|
||||
"""Adds the imports back to the file.
|
||||
|
||||
(at the index of the first import) sorted alphabetically and split between groups
|
||||
|
||||
"""
|
||||
if parsed.import_index == -1:
|
||||
return _output_as_string(parsed.lines_without_imports, parsed.line_separator)
|
||||
|
||||
formatted_output: List[str] = parsed.lines_without_imports.copy()
|
||||
remove_imports = [format_simplified(removal) for removal in config.remove_imports]
|
||||
|
||||
sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate)
|
||||
|
||||
if config.no_sections:
|
||||
parsed.imports["no_sections"] = {"straight": {}, "from": {}}
|
||||
base_sections: Tuple[str, ...] = ()
|
||||
for section in sections:
|
||||
if section == "FUTURE":
|
||||
base_sections = ("FUTURE",)
|
||||
continue
|
||||
parsed.imports["no_sections"]["straight"].update(
|
||||
parsed.imports[section].get("straight", {})
|
||||
)
|
||||
parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {}))
|
||||
sections = base_sections + ("no_sections",)
|
||||
|
||||
output: List[str] = []
|
||||
seen_headings: Set[str] = set()
|
||||
pending_lines_before = False
|
||||
for section in sections:
|
||||
straight_modules = parsed.imports[section]["straight"]
|
||||
if not config.only_sections:
|
||||
straight_modules = sorting.sort(
|
||||
config,
|
||||
straight_modules,
|
||||
key=lambda key: sorting.module_key(
|
||||
key, config, section_name=section, straight_import=True
|
||||
),
|
||||
reverse=config.reverse_sort,
|
||||
)
|
||||
|
||||
from_modules = parsed.imports[section]["from"]
|
||||
if not config.only_sections:
|
||||
from_modules = sorting.sort(
|
||||
config,
|
||||
from_modules,
|
||||
key=lambda key: sorting.module_key(key, config, section_name=section),
|
||||
reverse=config.reverse_sort,
|
||||
)
|
||||
|
||||
if config.star_first:
|
||||
star_modules = []
|
||||
other_modules = []
|
||||
for module in from_modules:
|
||||
if "*" in parsed.imports[section]["from"][module]:
|
||||
star_modules.append(module)
|
||||
else:
|
||||
other_modules.append(module)
|
||||
from_modules = star_modules + other_modules
|
||||
|
||||
straight_imports = _with_straight_imports(
|
||||
parsed, config, straight_modules, section, remove_imports, import_type
|
||||
)
|
||||
from_imports = _with_from_imports(
|
||||
parsed, config, from_modules, section, remove_imports, import_type
|
||||
)
|
||||
|
||||
lines_between = [""] * (
|
||||
config.lines_between_types if from_modules and straight_modules else 0
|
||||
)
|
||||
if config.from_first:
|
||||
section_output = from_imports + lines_between + straight_imports
|
||||
else:
|
||||
section_output = straight_imports + lines_between + from_imports
|
||||
|
||||
if config.force_sort_within_sections:
|
||||
# collapse comments
|
||||
comments_above = []
|
||||
new_section_output: List[str] = []
|
||||
for line in section_output:
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("#"):
|
||||
comments_above.append(line)
|
||||
elif comments_above:
|
||||
new_section_output.append(_LineWithComments(line, comments_above))
|
||||
comments_above = []
|
||||
else:
|
||||
new_section_output.append(line)
|
||||
# only_sections options is not imposed if force_sort_within_sections is True
|
||||
new_section_output = sorting.sort(
|
||||
config,
|
||||
new_section_output,
|
||||
key=partial(sorting.section_key, config=config),
|
||||
reverse=config.reverse_sort,
|
||||
)
|
||||
|
||||
# uncollapse comments
|
||||
section_output = []
|
||||
for line in new_section_output:
|
||||
comments = getattr(line, "comments", ())
|
||||
if comments:
|
||||
section_output.extend(comments)
|
||||
section_output.append(str(line))
|
||||
|
||||
section_name = section
|
||||
no_lines_before = section_name in config.no_lines_before
|
||||
|
||||
if section_output:
|
||||
if section_name in parsed.place_imports:
|
||||
parsed.place_imports[section_name] = section_output
|
||||
continue
|
||||
|
||||
section_title = config.import_headings.get(section_name.lower(), "")
|
||||
if section_title and section_title not in seen_headings:
|
||||
if config.dedup_headings:
|
||||
seen_headings.add(section_title)
|
||||
section_comment = f"# {section_title}"
|
||||
if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch
|
||||
section_output.insert(0, section_comment)
|
||||
|
||||
section_footer = config.import_footers.get(section_name.lower(), "")
|
||||
if section_footer and section_footer not in seen_headings:
|
||||
if config.dedup_headings:
|
||||
seen_headings.add(section_footer)
|
||||
section_comment_end = f"# {section_footer}"
|
||||
if (
|
||||
section_comment_end not in parsed.lines_without_imports[-1:]
|
||||
): # pragma: no branch
|
||||
section_output.append("") # Empty line for black compatibility
|
||||
section_output.append(section_comment_end)
|
||||
|
||||
if pending_lines_before or not no_lines_before:
|
||||
output += [""] * config.lines_between_sections
|
||||
|
||||
output += section_output
|
||||
|
||||
pending_lines_before = False
|
||||
else:
|
||||
pending_lines_before = pending_lines_before or not no_lines_before
|
||||
|
||||
if config.ensure_newline_before_comments:
|
||||
output = _ensure_newline_before_comment(output)
|
||||
|
||||
while output and output[-1].strip() == "":
|
||||
output.pop() # pragma: no cover
|
||||
while output and output[0].strip() == "":
|
||||
output.pop(0)
|
||||
|
||||
if config.formatting_function:
|
||||
output = config.formatting_function(
|
||||
parsed.line_separator.join(output), extension, config
|
||||
).splitlines()
|
||||
|
||||
output_at = 0
|
||||
if parsed.import_index < parsed.original_line_count:
|
||||
output_at = parsed.import_index
|
||||
formatted_output[output_at:0] = output
|
||||
|
||||
if output:
|
||||
imports_tail = output_at + len(output)
|
||||
while [
|
||||
character.strip() for character in formatted_output[imports_tail : imports_tail + 1]
|
||||
] == [""]:
|
||||
formatted_output.pop(imports_tail)
|
||||
|
||||
if len(formatted_output) > imports_tail:
|
||||
next_construct = ""
|
||||
tail = formatted_output[imports_tail:]
|
||||
|
||||
for index, line in enumerate(tail): # pragma: no branch
|
||||
should_skip, in_quote, *_ = parse.skip_line(
|
||||
line,
|
||||
in_quote="",
|
||||
index=len(formatted_output),
|
||||
section_comments=config.section_comments,
|
||||
needs_import=False,
|
||||
)
|
||||
if not should_skip and line.strip():
|
||||
if (
|
||||
line.strip().startswith("#")
|
||||
and len(tail) > (index + 1)
|
||||
and tail[index + 1].strip()
|
||||
):
|
||||
continue
|
||||
next_construct = line
|
||||
break
|
||||
if in_quote: # pragma: no branch
|
||||
next_construct = line
|
||||
break
|
||||
|
||||
if config.lines_after_imports != -1:
|
||||
formatted_output[imports_tail:0] = [
|
||||
"" for line in range(config.lines_after_imports)
|
||||
]
|
||||
elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS):
|
||||
formatted_output[imports_tail:0] = ["", ""]
|
||||
else:
|
||||
formatted_output[imports_tail:0] = [""]
|
||||
|
||||
if config.lines_before_imports != -1:
|
||||
formatted_output[:0] = ["" for line in range(config.lines_before_imports)]
|
||||
|
||||
if parsed.place_imports:
|
||||
new_out_lines = []
|
||||
for index, line in enumerate(formatted_output):
|
||||
new_out_lines.append(line)
|
||||
if line in parsed.import_placements:
|
||||
new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]])
|
||||
if (
|
||||
len(formatted_output) <= (index + 1)
|
||||
or formatted_output[index + 1].strip() != ""
|
||||
):
|
||||
new_out_lines.append("")
|
||||
formatted_output = new_out_lines
|
||||
|
||||
return _output_as_string(formatted_output, parsed.line_separator)
|
||||
|
||||
|
||||
def _with_from_imports(
|
||||
parsed: parse.ParsedContent,
|
||||
config: Config,
|
||||
from_modules: Iterable[str],
|
||||
section: str,
|
||||
remove_imports: List[str],
|
||||
import_type: str,
|
||||
) -> List[str]:
|
||||
output: List[str] = []
|
||||
for module in from_modules:
|
||||
if module in remove_imports:
|
||||
continue
|
||||
|
||||
import_start = f"from {module} {import_type} "
|
||||
from_imports = list(parsed.imports[section]["from"][module])
|
||||
if (
|
||||
not config.no_inline_sort
|
||||
or (config.force_single_line and module not in config.single_line_exclusions)
|
||||
) and not config.only_sections:
|
||||
from_imports = sorting.sort(
|
||||
config,
|
||||
from_imports,
|
||||
key=lambda key: sorting.module_key(
|
||||
key,
|
||||
config,
|
||||
True,
|
||||
config.force_alphabetical_sort_within_sections,
|
||||
section_name=section,
|
||||
),
|
||||
reverse=config.reverse_sort,
|
||||
)
|
||||
if remove_imports:
|
||||
from_imports = [
|
||||
line for line in from_imports if f"{module}.{line}" not in remove_imports
|
||||
]
|
||||
|
||||
sub_modules = [f"{module}.{from_import}" for from_import in from_imports]
|
||||
as_imports = {
|
||||
from_import: [
|
||||
f"{from_import} as {as_module}" for as_module in parsed.as_map["from"][sub_module]
|
||||
]
|
||||
for from_import, sub_module in zip(from_imports, sub_modules)
|
||||
if sub_module in parsed.as_map["from"]
|
||||
}
|
||||
if config.combine_as_imports and not ("*" in from_imports and config.combine_star):
|
||||
if not config.no_inline_sort:
|
||||
for as_import in as_imports:
|
||||
if not config.only_sections:
|
||||
as_imports[as_import] = sorting.sort(config, as_imports[as_import])
|
||||
for from_import in copy.copy(from_imports):
|
||||
if from_import in as_imports:
|
||||
idx = from_imports.index(from_import)
|
||||
if parsed.imports[section]["from"][module][from_import]:
|
||||
from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import)
|
||||
else:
|
||||
from_imports[idx : (idx + 1)] = as_imports.pop(from_import)
|
||||
|
||||
only_show_as_imports = False
|
||||
comments = parsed.categorized_comments["from"].pop(module, ())
|
||||
above_comments = parsed.categorized_comments["above"]["from"].pop(module, None)
|
||||
while from_imports:
|
||||
if above_comments:
|
||||
output.extend(above_comments)
|
||||
above_comments = None
|
||||
|
||||
if "*" in from_imports and config.combine_star:
|
||||
import_statement = wrap.line(
|
||||
with_comments(
|
||||
_with_star_comments(parsed, module, list(comments or ())),
|
||||
f"{import_start}*",
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
),
|
||||
parsed.line_separator,
|
||||
config,
|
||||
)
|
||||
from_imports = [
|
||||
from_import for from_import in from_imports if from_import in as_imports
|
||||
]
|
||||
only_show_as_imports = True
|
||||
elif config.force_single_line and module not in config.single_line_exclusions:
|
||||
import_statement = ""
|
||||
while from_imports:
|
||||
from_import = from_imports.pop(0)
|
||||
single_import_line = with_comments(
|
||||
comments,
|
||||
import_start + from_import,
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
comment = (
|
||||
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
|
||||
)
|
||||
if comment:
|
||||
single_import_line += (
|
||||
f"{comments and ';' or config.comment_prefix} " f"{comment}"
|
||||
)
|
||||
if from_import in as_imports:
|
||||
if (
|
||||
parsed.imports[section]["from"][module][from_import]
|
||||
and not only_show_as_imports
|
||||
):
|
||||
output.append(
|
||||
wrap.line(single_import_line, parsed.line_separator, config)
|
||||
)
|
||||
from_comments = parsed.categorized_comments["straight"].get(
|
||||
f"{module}.{from_import}"
|
||||
)
|
||||
|
||||
if not config.only_sections:
|
||||
output.extend(
|
||||
with_comments(
|
||||
from_comments,
|
||||
wrap.line(
|
||||
import_start + as_import, parsed.line_separator, config
|
||||
),
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
for as_import in sorting.sort(config, as_imports[from_import])
|
||||
)
|
||||
|
||||
else:
|
||||
output.extend(
|
||||
with_comments(
|
||||
from_comments,
|
||||
wrap.line(
|
||||
import_start + as_import, parsed.line_separator, config
|
||||
),
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
for as_import in as_imports[from_import]
|
||||
)
|
||||
else:
|
||||
output.append(wrap.line(single_import_line, parsed.line_separator, config))
|
||||
comments = None
|
||||
else:
|
||||
while from_imports and from_imports[0] in as_imports:
|
||||
from_import = from_imports.pop(0)
|
||||
|
||||
if not config.only_sections:
|
||||
as_imports[from_import] = sorting.sort(config, as_imports[from_import])
|
||||
from_comments = (
|
||||
parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or []
|
||||
)
|
||||
if (
|
||||
parsed.imports[section]["from"][module][from_import]
|
||||
and not only_show_as_imports
|
||||
):
|
||||
specific_comment = (
|
||||
parsed.categorized_comments["nested"]
|
||||
.get(module, {})
|
||||
.pop(from_import, None)
|
||||
)
|
||||
if specific_comment:
|
||||
from_comments.append(specific_comment)
|
||||
output.append(
|
||||
wrap.line(
|
||||
with_comments(
|
||||
from_comments,
|
||||
import_start + from_import,
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
),
|
||||
parsed.line_separator,
|
||||
config,
|
||||
)
|
||||
)
|
||||
from_comments = []
|
||||
|
||||
for as_import in as_imports[from_import]:
|
||||
specific_comment = (
|
||||
parsed.categorized_comments["nested"]
|
||||
.get(module, {})
|
||||
.pop(as_import, None)
|
||||
)
|
||||
if specific_comment:
|
||||
from_comments.append(specific_comment)
|
||||
|
||||
output.append(
|
||||
wrap.line(
|
||||
with_comments(
|
||||
from_comments,
|
||||
import_start + as_import,
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
),
|
||||
parsed.line_separator,
|
||||
config,
|
||||
)
|
||||
)
|
||||
|
||||
from_comments = []
|
||||
|
||||
if "*" in from_imports:
|
||||
output.append(
|
||||
with_comments(
|
||||
_with_star_comments(parsed, module, []),
|
||||
f"{import_start}*",
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
)
|
||||
from_imports.remove("*")
|
||||
|
||||
for from_import in copy.copy(from_imports):
|
||||
comment = (
|
||||
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
|
||||
)
|
||||
if comment:
|
||||
from_imports.remove(from_import)
|
||||
if from_imports:
|
||||
use_comments = []
|
||||
else:
|
||||
use_comments = comments
|
||||
comments = None
|
||||
single_import_line = with_comments(
|
||||
use_comments,
|
||||
import_start + from_import,
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
single_import_line += (
|
||||
f"{use_comments and ';' or config.comment_prefix} " f"{comment}"
|
||||
)
|
||||
output.append(wrap.line(single_import_line, parsed.line_separator, config))
|
||||
|
||||
from_import_section = []
|
||||
while from_imports and (
|
||||
from_imports[0] not in as_imports
|
||||
or (
|
||||
config.combine_as_imports
|
||||
and parsed.imports[section]["from"][module][from_import]
|
||||
)
|
||||
):
|
||||
from_import_section.append(from_imports.pop(0))
|
||||
if config.combine_as_imports:
|
||||
comments = (comments or []) + list(
|
||||
parsed.categorized_comments["from"].pop(f"{module}.__combined_as__", ())
|
||||
)
|
||||
import_statement = with_comments(
|
||||
comments,
|
||||
import_start + (", ").join(from_import_section),
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
if not from_import_section:
|
||||
import_statement = ""
|
||||
|
||||
do_multiline_reformat = False
|
||||
|
||||
force_grid_wrap = config.force_grid_wrap
|
||||
if force_grid_wrap and len(from_import_section) >= force_grid_wrap:
|
||||
do_multiline_reformat = True
|
||||
|
||||
if len(import_statement) > config.line_length and len(from_import_section) > 1:
|
||||
do_multiline_reformat = True
|
||||
|
||||
# If line too long AND have imports AND we are
|
||||
# NOT using GRID or VERTICAL wrap modes
|
||||
if (
|
||||
len(import_statement) > config.line_length
|
||||
and len(from_import_section) > 0
|
||||
and config.multi_line_output
|
||||
not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore
|
||||
):
|
||||
do_multiline_reformat = True
|
||||
|
||||
if do_multiline_reformat:
|
||||
import_statement = wrap.import_statement(
|
||||
import_start=import_start,
|
||||
from_imports=from_import_section,
|
||||
comments=comments,
|
||||
line_separator=parsed.line_separator,
|
||||
config=config,
|
||||
)
|
||||
if config.multi_line_output == wrap.Modes.GRID: # type: ignore
|
||||
other_import_statement = wrap.import_statement(
|
||||
import_start=import_start,
|
||||
from_imports=from_import_section,
|
||||
comments=comments,
|
||||
line_separator=parsed.line_separator,
|
||||
config=config,
|
||||
multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore
|
||||
)
|
||||
if (
|
||||
max(
|
||||
len(import_line)
|
||||
for import_line in import_statement.split(parsed.line_separator)
|
||||
)
|
||||
> config.line_length
|
||||
):
|
||||
import_statement = other_import_statement
|
||||
if not do_multiline_reformat and len(import_statement) > config.line_length:
|
||||
import_statement = wrap.line(import_statement, parsed.line_separator, config)
|
||||
|
||||
if import_statement:
|
||||
output.append(import_statement)
|
||||
return output
|
||||
|
||||
|
||||
def _with_straight_imports(
|
||||
parsed: parse.ParsedContent,
|
||||
config: Config,
|
||||
straight_modules: Iterable[str],
|
||||
section: str,
|
||||
remove_imports: List[str],
|
||||
import_type: str,
|
||||
) -> List[str]:
|
||||
output: List[str] = []
|
||||
|
||||
as_imports = any((module in parsed.as_map["straight"] for module in straight_modules))
|
||||
|
||||
# combine_straight_imports only works for bare imports, 'as' imports not included
|
||||
if config.combine_straight_imports and not as_imports:
|
||||
if not straight_modules:
|
||||
return []
|
||||
|
||||
above_comments: List[str] = []
|
||||
inline_comments: List[str] = []
|
||||
|
||||
for module in straight_modules:
|
||||
if module in parsed.categorized_comments["above"]["straight"]:
|
||||
above_comments.extend(parsed.categorized_comments["above"]["straight"].pop(module))
|
||||
if module in parsed.categorized_comments["straight"]:
|
||||
inline_comments.extend(parsed.categorized_comments["straight"][module])
|
||||
|
||||
combined_straight_imports = ", ".join(straight_modules)
|
||||
if inline_comments:
|
||||
combined_inline_comments = " ".join(inline_comments)
|
||||
else:
|
||||
combined_inline_comments = ""
|
||||
|
||||
output.extend(above_comments)
|
||||
|
||||
if combined_inline_comments:
|
||||
output.append(
|
||||
f"{import_type} {combined_straight_imports} # {combined_inline_comments}"
|
||||
)
|
||||
else:
|
||||
output.append(f"{import_type} {combined_straight_imports}")
|
||||
|
||||
return output
|
||||
|
||||
for module in straight_modules:
|
||||
if module in remove_imports:
|
||||
continue
|
||||
|
||||
import_definition = []
|
||||
if module in parsed.as_map["straight"]:
|
||||
if parsed.imports[section]["straight"][module]:
|
||||
import_definition.append((f"{import_type} {module}", module))
|
||||
import_definition.extend(
|
||||
(f"{import_type} {module} as {as_import}", f"{module} as {as_import}")
|
||||
for as_import in parsed.as_map["straight"][module]
|
||||
)
|
||||
else:
|
||||
import_definition.append((f"{import_type} {module}", module))
|
||||
|
||||
comments_above = parsed.categorized_comments["above"]["straight"].pop(module, None)
|
||||
if comments_above:
|
||||
output.extend(comments_above)
|
||||
output.extend(
|
||||
with_comments(
|
||||
parsed.categorized_comments["straight"].get(imodule),
|
||||
idef,
|
||||
removed=config.ignore_comments,
|
||||
comment_prefix=config.comment_prefix,
|
||||
)
|
||||
for idef, imodule in import_definition
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _output_as_string(lines: List[str], line_separator: str) -> str:
|
||||
return line_separator.join(_normalize_empty_lines(lines))
|
||||
|
||||
|
||||
def _normalize_empty_lines(lines: List[str]) -> List[str]:
|
||||
while lines and lines[-1].strip() == "":
|
||||
lines.pop(-1)
|
||||
|
||||
lines.append("")
|
||||
return lines
|
||||
|
||||
|
||||
class _LineWithComments(str):
|
||||
comments: List[str]
|
||||
|
||||
def __new__(
|
||||
cls: Type["_LineWithComments"], value: Any, comments: List[str]
|
||||
) -> "_LineWithComments":
|
||||
instance = super().__new__(cls, value)
|
||||
instance.comments = comments
|
||||
return instance
|
||||
|
||||
|
||||
def _ensure_newline_before_comment(output: List[str]) -> List[str]:
|
||||
new_output: List[str] = []
|
||||
|
||||
def is_comment(line: Optional[str]) -> bool:
|
||||
return line.startswith("#") if line else False
|
||||
|
||||
for line, prev_line in zip(output, [None] + output): # type: ignore
|
||||
if is_comment(line) and prev_line != "" and not is_comment(prev_line):
|
||||
new_output.append("")
|
||||
new_output.append(line)
|
||||
return new_output
|
||||
|
||||
|
||||
def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]:
|
||||
star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None)
|
||||
if star_comment:
|
||||
return comments + [star_comment]
|
||||
return comments
|
||||
590
.venv/lib/python3.8/site-packages/isort/parse.py
Normal file
590
.venv/lib/python3.8/site-packages/isort/parse.py
Normal file
|
|
@ -0,0 +1,590 @@
|
|||
"""Defines parsing functions used by isort for parsing import definitions"""
|
||||
from collections import OrderedDict, defaultdict
|
||||
from functools import partial
|
||||
from itertools import chain
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple
|
||||
from warnings import warn
|
||||
|
||||
from . import place
|
||||
from .comments import parse as parse_comments
|
||||
from .exceptions import MissingSection
|
||||
from .settings import DEFAULT_CONFIG, Config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from mypy_extensions import TypedDict
|
||||
|
||||
CommentsAboveDict = TypedDict(
|
||||
"CommentsAboveDict", {"straight": Dict[str, Any], "from": Dict[str, Any]}
|
||||
)
|
||||
|
||||
CommentsDict = TypedDict(
|
||||
"CommentsDict",
|
||||
{
|
||||
"from": Dict[str, Any],
|
||||
"straight": Dict[str, Any],
|
||||
"nested": Dict[str, Any],
|
||||
"above": CommentsAboveDict,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _infer_line_separator(contents: str) -> str:
|
||||
if "\r\n" in contents:
|
||||
return "\r\n"
|
||||
if "\r" in contents:
|
||||
return "\r"
|
||||
return "\n"
|
||||
|
||||
|
||||
def _normalize_line(raw_line: str) -> Tuple[str, str]:
|
||||
"""Normalizes import related statements in the provided line.
|
||||
|
||||
Returns (normalized_line: str, raw_line: str)
|
||||
"""
|
||||
line = raw_line.replace("from.import ", "from . import ")
|
||||
line = line.replace("from.cimport ", "from . cimport ")
|
||||
line = line.replace("import*", "import *")
|
||||
line = line.replace(" .import ", " . import ")
|
||||
line = line.replace(" .cimport ", " . cimport ")
|
||||
line = line.replace("\t", " ")
|
||||
return (line, raw_line)
|
||||
|
||||
|
||||
def import_type(line: str, config: Config = DEFAULT_CONFIG) -> Optional[str]:
|
||||
"""If the current line is an import line it will return its type (from or straight)"""
|
||||
if config.honor_noqa and line.lower().rstrip().endswith("noqa"):
|
||||
return None
|
||||
if "isort:skip" in line or "isort: skip" in line or "isort: split" in line:
|
||||
return None
|
||||
if line.startswith(("import ", "cimport ")):
|
||||
return "straight"
|
||||
if line.startswith("from "):
|
||||
return "from"
|
||||
return None
|
||||
|
||||
|
||||
def _strip_syntax(import_string: str) -> str:
|
||||
import_string = import_string.replace("_import", "[[i]]")
|
||||
import_string = import_string.replace("_cimport", "[[ci]]")
|
||||
for remove_syntax in ["\\", "(", ")", ","]:
|
||||
import_string = import_string.replace(remove_syntax, " ")
|
||||
import_list = import_string.split()
|
||||
for key in ("from", "import", "cimport"):
|
||||
if key in import_list:
|
||||
import_list.remove(key)
|
||||
import_string = " ".join(import_list)
|
||||
import_string = import_string.replace("[[i]]", "_import")
|
||||
import_string = import_string.replace("[[ci]]", "_cimport")
|
||||
return import_string.replace("{ ", "{|").replace(" }", "|}")
|
||||
|
||||
|
||||
def skip_line(
|
||||
line: str,
|
||||
in_quote: str,
|
||||
index: int,
|
||||
section_comments: Tuple[str, ...],
|
||||
needs_import: bool = True,
|
||||
) -> Tuple[bool, str]:
|
||||
"""Determine if a given line should be skipped.
|
||||
|
||||
Returns back a tuple containing:
|
||||
|
||||
(skip_line: bool,
|
||||
in_quote: str,)
|
||||
"""
|
||||
should_skip = bool(in_quote)
|
||||
if '"' in line or "'" in line:
|
||||
char_index = 0
|
||||
while char_index < len(line):
|
||||
if line[char_index] == "\\":
|
||||
char_index += 1
|
||||
elif in_quote:
|
||||
if line[char_index : char_index + len(in_quote)] == in_quote:
|
||||
in_quote = ""
|
||||
elif line[char_index] in ("'", '"'):
|
||||
long_quote = line[char_index : char_index + 3]
|
||||
if long_quote in ('"""', "'''"):
|
||||
in_quote = long_quote
|
||||
char_index += 2
|
||||
else:
|
||||
in_quote = line[char_index]
|
||||
elif line[char_index] == "#":
|
||||
break
|
||||
char_index += 1
|
||||
|
||||
if ";" in line.split("#")[0] and needs_import:
|
||||
for part in (part.strip() for part in line.split(";")):
|
||||
if (
|
||||
part
|
||||
and not part.startswith("from ")
|
||||
and not part.startswith(("import ", "cimport "))
|
||||
):
|
||||
should_skip = True
|
||||
|
||||
return (bool(should_skip or in_quote), in_quote)
|
||||
|
||||
|
||||
class ParsedContent(NamedTuple):
|
||||
in_lines: List[str]
|
||||
lines_without_imports: List[str]
|
||||
import_index: int
|
||||
place_imports: Dict[str, List[str]]
|
||||
import_placements: Dict[str, str]
|
||||
as_map: Dict[str, Dict[str, List[str]]]
|
||||
imports: Dict[str, Dict[str, Any]]
|
||||
categorized_comments: "CommentsDict"
|
||||
change_count: int
|
||||
original_line_count: int
|
||||
line_separator: str
|
||||
sections: Any
|
||||
verbose_output: List[str]
|
||||
|
||||
|
||||
def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedContent:
|
||||
"""Parses a python file taking out and categorizing imports."""
|
||||
line_separator: str = config.line_ending or _infer_line_separator(contents)
|
||||
in_lines = contents.splitlines()
|
||||
if contents and contents[-1] in ("\n", "\r"):
|
||||
in_lines.append("")
|
||||
|
||||
out_lines = []
|
||||
original_line_count = len(in_lines)
|
||||
if config.old_finders:
|
||||
from .deprecated.finders import FindersManager
|
||||
|
||||
finder = FindersManager(config=config).find
|
||||
else:
|
||||
finder = partial(place.module, config=config)
|
||||
|
||||
line_count = len(in_lines)
|
||||
|
||||
place_imports: Dict[str, List[str]] = {}
|
||||
import_placements: Dict[str, str] = {}
|
||||
as_map: Dict[str, Dict[str, List[str]]] = {
|
||||
"straight": defaultdict(list),
|
||||
"from": defaultdict(list),
|
||||
}
|
||||
imports: OrderedDict[str, Dict[str, Any]] = OrderedDict()
|
||||
verbose_output: List[str] = []
|
||||
|
||||
for section in chain(config.sections, config.forced_separate):
|
||||
imports[section] = {"straight": OrderedDict(), "from": OrderedDict()}
|
||||
categorized_comments: CommentsDict = {
|
||||
"from": {},
|
||||
"straight": {},
|
||||
"nested": {},
|
||||
"above": {"straight": {}, "from": {}},
|
||||
}
|
||||
|
||||
index = 0
|
||||
import_index = -1
|
||||
in_quote = ""
|
||||
while index < line_count:
|
||||
line = in_lines[index]
|
||||
index += 1
|
||||
statement_index = index
|
||||
(skipping_line, in_quote) = skip_line(
|
||||
line, in_quote=in_quote, index=index, section_comments=config.section_comments
|
||||
)
|
||||
|
||||
if (
|
||||
line in config.section_comments or line in config.section_comments_end
|
||||
) and not skipping_line:
|
||||
if import_index == -1: # pragma: no branch
|
||||
import_index = index - 1
|
||||
continue
|
||||
|
||||
if "isort:imports-" in line and line.startswith("#"):
|
||||
section = line.split("isort:imports-")[-1].split()[0].upper()
|
||||
place_imports[section] = []
|
||||
import_placements[line] = section
|
||||
elif "isort: imports-" in line and line.startswith("#"):
|
||||
section = line.split("isort: imports-")[-1].split()[0].upper()
|
||||
place_imports[section] = []
|
||||
import_placements[line] = section
|
||||
|
||||
if skipping_line:
|
||||
out_lines.append(line)
|
||||
continue
|
||||
|
||||
lstripped_line = line.lstrip()
|
||||
if (
|
||||
config.float_to_top
|
||||
and import_index == -1
|
||||
and line
|
||||
and not in_quote
|
||||
and not lstripped_line.startswith("#")
|
||||
and not lstripped_line.startswith("'''")
|
||||
and not lstripped_line.startswith('"""')
|
||||
):
|
||||
if not lstripped_line.startswith("import") and not lstripped_line.startswith("from"):
|
||||
import_index = index - 1
|
||||
while import_index and not in_lines[import_index - 1]:
|
||||
import_index -= 1
|
||||
else:
|
||||
commentless = line.split("#", 1)[0].strip()
|
||||
if (
|
||||
("isort:skip" in line or "isort: skip" in line)
|
||||
and "(" in commentless
|
||||
and ")" not in commentless
|
||||
):
|
||||
import_index = index
|
||||
|
||||
starting_line = line
|
||||
while "isort:skip" in starting_line or "isort: skip" in starting_line:
|
||||
commentless = starting_line.split("#", 1)[0]
|
||||
if (
|
||||
"(" in commentless
|
||||
and not commentless.rstrip().endswith(")")
|
||||
and import_index < line_count
|
||||
):
|
||||
|
||||
while import_index < line_count and not commentless.rstrip().endswith(
|
||||
")"
|
||||
):
|
||||
commentless = in_lines[import_index].split("#", 1)[0]
|
||||
import_index += 1
|
||||
else:
|
||||
import_index += 1
|
||||
|
||||
if import_index >= line_count:
|
||||
break
|
||||
|
||||
starting_line = in_lines[import_index]
|
||||
|
||||
line, *end_of_line_comment = line.split("#", 1)
|
||||
if ";" in line:
|
||||
statements = [line.strip() for line in line.split(";")]
|
||||
else:
|
||||
statements = [line]
|
||||
if end_of_line_comment:
|
||||
statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}"
|
||||
|
||||
for statement in statements:
|
||||
line, raw_line = _normalize_line(statement)
|
||||
type_of_import = import_type(line, config) or ""
|
||||
raw_lines = [raw_line]
|
||||
if not type_of_import:
|
||||
out_lines.append(raw_line)
|
||||
continue
|
||||
|
||||
if import_index == -1:
|
||||
import_index = index - 1
|
||||
nested_comments = {}
|
||||
import_string, comment = parse_comments(line)
|
||||
comments = [comment] if comment else []
|
||||
line_parts = [part for part in _strip_syntax(import_string).strip().split(" ") if part]
|
||||
if type_of_import == "from" and len(line_parts) == 2 and comments:
|
||||
nested_comments[line_parts[-1]] = comments[0]
|
||||
|
||||
if "(" in line.split("#", 1)[0] and index < line_count:
|
||||
while not line.split("#")[0].strip().endswith(")") and index < line_count:
|
||||
line, new_comment = parse_comments(in_lines[index])
|
||||
index += 1
|
||||
if new_comment:
|
||||
comments.append(new_comment)
|
||||
stripped_line = _strip_syntax(line).strip()
|
||||
if (
|
||||
type_of_import == "from"
|
||||
and stripped_line
|
||||
and " " not in stripped_line.replace(" as ", "")
|
||||
and new_comment
|
||||
):
|
||||
nested_comments[stripped_line] = comments[-1]
|
||||
import_string += line_separator + line
|
||||
raw_lines.append(line)
|
||||
else:
|
||||
while line.strip().endswith("\\"):
|
||||
line, new_comment = parse_comments(in_lines[index])
|
||||
line = line.lstrip()
|
||||
index += 1
|
||||
if new_comment:
|
||||
comments.append(new_comment)
|
||||
|
||||
# Still need to check for parentheses after an escaped line
|
||||
if (
|
||||
"(" in line.split("#")[0]
|
||||
and ")" not in line.split("#")[0]
|
||||
and index < line_count
|
||||
):
|
||||
stripped_line = _strip_syntax(line).strip()
|
||||
if (
|
||||
type_of_import == "from"
|
||||
and stripped_line
|
||||
and " " not in stripped_line.replace(" as ", "")
|
||||
and new_comment
|
||||
):
|
||||
nested_comments[stripped_line] = comments[-1]
|
||||
import_string += line_separator + line
|
||||
raw_lines.append(line)
|
||||
|
||||
while not line.split("#")[0].strip().endswith(")") and index < line_count:
|
||||
line, new_comment = parse_comments(in_lines[index])
|
||||
index += 1
|
||||
if new_comment:
|
||||
comments.append(new_comment)
|
||||
stripped_line = _strip_syntax(line).strip()
|
||||
if (
|
||||
type_of_import == "from"
|
||||
and stripped_line
|
||||
and " " not in stripped_line.replace(" as ", "")
|
||||
and new_comment
|
||||
):
|
||||
nested_comments[stripped_line] = comments[-1]
|
||||
import_string += line_separator + line
|
||||
raw_lines.append(line)
|
||||
|
||||
stripped_line = _strip_syntax(line).strip()
|
||||
if (
|
||||
type_of_import == "from"
|
||||
and stripped_line
|
||||
and " " not in stripped_line.replace(" as ", "")
|
||||
and new_comment
|
||||
):
|
||||
nested_comments[stripped_line] = comments[-1]
|
||||
if import_string.strip().endswith(
|
||||
(" import", " cimport")
|
||||
) or line.strip().startswith(("import ", "cimport ")):
|
||||
import_string += line_separator + line
|
||||
else:
|
||||
import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip()
|
||||
|
||||
if type_of_import == "from":
|
||||
cimports: bool
|
||||
import_string = (
|
||||
import_string.replace("import(", "import (")
|
||||
.replace("\\", " ")
|
||||
.replace("\n", " ")
|
||||
)
|
||||
if "import " not in import_string:
|
||||
out_lines.extend(raw_lines)
|
||||
continue
|
||||
|
||||
if " cimport " in import_string:
|
||||
parts = import_string.split(" cimport ")
|
||||
cimports = True
|
||||
|
||||
else:
|
||||
parts = import_string.split(" import ")
|
||||
cimports = False
|
||||
|
||||
from_import = parts[0].split(" ")
|
||||
import_string = (" cimport " if cimports else " import ").join(
|
||||
[from_import[0] + " " + "".join(from_import[1:])] + parts[1:]
|
||||
)
|
||||
|
||||
just_imports = [
|
||||
item.replace("{|", "{ ").replace("|}", " }")
|
||||
for item in _strip_syntax(import_string).split()
|
||||
]
|
||||
|
||||
attach_comments_to: Optional[List[Any]] = None
|
||||
direct_imports = just_imports[1:]
|
||||
straight_import = True
|
||||
top_level_module = ""
|
||||
if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports):
|
||||
straight_import = False
|
||||
while "as" in just_imports:
|
||||
nested_module = None
|
||||
as_index = just_imports.index("as")
|
||||
if type_of_import == "from":
|
||||
nested_module = just_imports[as_index - 1]
|
||||
top_level_module = just_imports[0]
|
||||
module = top_level_module + "." + nested_module
|
||||
as_name = just_imports[as_index + 1]
|
||||
direct_imports.remove(nested_module)
|
||||
direct_imports.remove(as_name)
|
||||
direct_imports.remove("as")
|
||||
if nested_module == as_name and config.remove_redundant_aliases:
|
||||
pass
|
||||
elif as_name not in as_map["from"][module]: # pragma: no branch
|
||||
as_map["from"][module].append(as_name)
|
||||
|
||||
full_name = f"{nested_module} as {as_name}"
|
||||
associated_comment = nested_comments.get(full_name)
|
||||
if associated_comment:
|
||||
categorized_comments["nested"].setdefault(top_level_module, {})[
|
||||
full_name
|
||||
] = associated_comment
|
||||
if associated_comment in comments: # pragma: no branch
|
||||
comments.pop(comments.index(associated_comment))
|
||||
else:
|
||||
module = just_imports[as_index - 1]
|
||||
as_name = just_imports[as_index + 1]
|
||||
if module == as_name and config.remove_redundant_aliases:
|
||||
pass
|
||||
elif as_name not in as_map["straight"][module]:
|
||||
as_map["straight"][module].append(as_name)
|
||||
|
||||
if comments and attach_comments_to is None:
|
||||
if nested_module and config.combine_as_imports:
|
||||
attach_comments_to = categorized_comments["from"].setdefault(
|
||||
f"{top_level_module}.__combined_as__", []
|
||||
)
|
||||
else:
|
||||
if type_of_import == "from" or (
|
||||
config.remove_redundant_aliases and as_name == module.split(".")[-1]
|
||||
):
|
||||
attach_comments_to = categorized_comments["straight"].setdefault(
|
||||
module, []
|
||||
)
|
||||
else:
|
||||
attach_comments_to = categorized_comments["straight"].setdefault(
|
||||
f"{module} as {as_name}", []
|
||||
)
|
||||
del just_imports[as_index : as_index + 2]
|
||||
|
||||
if type_of_import == "from":
|
||||
import_from = just_imports.pop(0)
|
||||
placed_module = finder(import_from)
|
||||
if config.verbose and not config.only_modified:
|
||||
print(f"from-type place_module for {import_from} returned {placed_module}")
|
||||
|
||||
elif config.verbose:
|
||||
verbose_output.append(
|
||||
f"from-type place_module for {import_from} returned {placed_module}"
|
||||
)
|
||||
if placed_module == "":
|
||||
warn(
|
||||
f"could not place module {import_from} of line {line} --"
|
||||
" Do you need to define a default section?"
|
||||
)
|
||||
|
||||
if placed_module and placed_module not in imports:
|
||||
raise MissingSection(import_module=import_from, section=placed_module)
|
||||
|
||||
root = imports[placed_module][type_of_import] # type: ignore
|
||||
for import_name in just_imports:
|
||||
associated_comment = nested_comments.get(import_name)
|
||||
if associated_comment:
|
||||
categorized_comments["nested"].setdefault(import_from, {})[
|
||||
import_name
|
||||
] = associated_comment
|
||||
if associated_comment in comments: # pragma: no branch
|
||||
comments.pop(comments.index(associated_comment))
|
||||
if (
|
||||
config.force_single_line
|
||||
and comments
|
||||
and attach_comments_to is None
|
||||
and len(just_imports) == 1
|
||||
):
|
||||
nested_from_comments = categorized_comments["nested"].setdefault(
|
||||
import_from, {}
|
||||
)
|
||||
existing_comment = nested_from_comments.get(just_imports[0], "")
|
||||
nested_from_comments[
|
||||
just_imports[0]
|
||||
] = f"{existing_comment}{'; ' if existing_comment else ''}{'; '.join(comments)}"
|
||||
comments = []
|
||||
|
||||
if comments and attach_comments_to is None:
|
||||
attach_comments_to = categorized_comments["from"].setdefault(import_from, [])
|
||||
|
||||
if len(out_lines) > max(import_index, 1) - 1:
|
||||
last = out_lines[-1].rstrip() if out_lines else ""
|
||||
while (
|
||||
last.startswith("#")
|
||||
and not last.endswith('"""')
|
||||
and not last.endswith("'''")
|
||||
and "isort:imports-" not in last
|
||||
and "isort: imports-" not in last
|
||||
and not config.treat_all_comments_as_code
|
||||
and not last.strip() in config.treat_comments_as_code
|
||||
):
|
||||
categorized_comments["above"]["from"].setdefault(import_from, []).insert(
|
||||
0, out_lines.pop(-1)
|
||||
)
|
||||
if out_lines:
|
||||
last = out_lines[-1].rstrip()
|
||||
else:
|
||||
last = ""
|
||||
if statement_index - 1 == import_index: # pragma: no cover
|
||||
import_index -= len(
|
||||
categorized_comments["above"]["from"].get(import_from, [])
|
||||
)
|
||||
|
||||
if import_from not in root:
|
||||
root[import_from] = OrderedDict(
|
||||
(module, module in direct_imports) for module in just_imports
|
||||
)
|
||||
else:
|
||||
root[import_from].update(
|
||||
(module, root[import_from].get(module, False) or module in direct_imports)
|
||||
for module in just_imports
|
||||
)
|
||||
|
||||
if comments and attach_comments_to is not None:
|
||||
attach_comments_to.extend(comments)
|
||||
else:
|
||||
if comments and attach_comments_to is not None:
|
||||
attach_comments_to.extend(comments)
|
||||
comments = []
|
||||
|
||||
for module in just_imports:
|
||||
if comments:
|
||||
categorized_comments["straight"][module] = comments
|
||||
comments = []
|
||||
|
||||
if len(out_lines) > max(import_index, +1, 1) - 1:
|
||||
|
||||
last = out_lines[-1].rstrip() if out_lines else ""
|
||||
while (
|
||||
last.startswith("#")
|
||||
and not last.endswith('"""')
|
||||
and not last.endswith("'''")
|
||||
and "isort:imports-" not in last
|
||||
and "isort: imports-" not in last
|
||||
and not config.treat_all_comments_as_code
|
||||
and not last.strip() in config.treat_comments_as_code
|
||||
):
|
||||
categorized_comments["above"]["straight"].setdefault(module, []).insert(
|
||||
0, out_lines.pop(-1)
|
||||
)
|
||||
if out_lines:
|
||||
last = out_lines[-1].rstrip()
|
||||
else:
|
||||
last = ""
|
||||
if index - 1 == import_index:
|
||||
import_index -= len(
|
||||
categorized_comments["above"]["straight"].get(module, [])
|
||||
)
|
||||
placed_module = finder(module)
|
||||
if config.verbose and not config.only_modified:
|
||||
print(f"else-type place_module for {module} returned {placed_module}")
|
||||
|
||||
elif config.verbose:
|
||||
verbose_output.append(
|
||||
f"else-type place_module for {module} returned {placed_module}"
|
||||
)
|
||||
if placed_module == "":
|
||||
warn(
|
||||
f"could not place module {module} of line {line} --"
|
||||
" Do you need to define a default section?"
|
||||
)
|
||||
imports.setdefault("", {"straight": OrderedDict(), "from": OrderedDict()})
|
||||
|
||||
if placed_module and placed_module not in imports:
|
||||
raise MissingSection(import_module=module, section=placed_module)
|
||||
|
||||
straight_import |= imports[placed_module][type_of_import].get( # type: ignore
|
||||
module, False
|
||||
)
|
||||
imports[placed_module][type_of_import][module] = straight_import # type: ignore
|
||||
|
||||
change_count = len(out_lines) - original_line_count
|
||||
|
||||
return ParsedContent(
|
||||
in_lines=in_lines,
|
||||
lines_without_imports=out_lines,
|
||||
import_index=import_index,
|
||||
place_imports=place_imports,
|
||||
import_placements=import_placements,
|
||||
as_map=as_map,
|
||||
imports=imports,
|
||||
categorized_comments=categorized_comments,
|
||||
change_count=change_count,
|
||||
original_line_count=original_line_count,
|
||||
line_separator=line_separator,
|
||||
sections=config.sections,
|
||||
verbose_output=verbose_output,
|
||||
)
|
||||
145
.venv/lib/python3.8/site-packages/isort/place.py
Normal file
145
.venv/lib/python3.8/site-packages/isort/place.py
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
"""Contains all logic related to placing an import within a certain section."""
|
||||
import importlib
|
||||
from fnmatch import fnmatch
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import FrozenSet, Iterable, Optional, Tuple
|
||||
|
||||
from isort import sections
|
||||
from isort.settings import DEFAULT_CONFIG, Config
|
||||
from isort.utils import exists_case_sensitive
|
||||
|
||||
LOCAL = "LOCALFOLDER"
|
||||
|
||||
|
||||
def module(name: str, config: Config = DEFAULT_CONFIG) -> str:
|
||||
"""Returns the section placement for the given module name."""
|
||||
return module_with_reason(name, config)[0]
|
||||
|
||||
|
||||
@lru_cache(maxsize=1000)
|
||||
def module_with_reason(name: str, config: Config = DEFAULT_CONFIG) -> Tuple[str, str]:
|
||||
"""Returns the section placement for the given module name alongside the reasoning."""
|
||||
return (
|
||||
_forced_separate(name, config)
|
||||
or _local(name, config)
|
||||
or _known_pattern(name, config)
|
||||
or _src_path(name, config)
|
||||
or (config.default_section, "Default option in Config or universal default.")
|
||||
)
|
||||
|
||||
|
||||
def _forced_separate(name: str, config: Config) -> Optional[Tuple[str, str]]:
|
||||
for forced_separate in config.forced_separate:
|
||||
# Ensure all forced_separate patterns will match to end of string
|
||||
path_glob = forced_separate
|
||||
if not forced_separate.endswith("*"):
|
||||
path_glob = "%s*" % forced_separate
|
||||
|
||||
if fnmatch(name, path_glob) or fnmatch(name, "." + path_glob):
|
||||
return (forced_separate, f"Matched forced_separate ({forced_separate}) config value.")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _local(name: str, config: Config) -> Optional[Tuple[str, str]]:
|
||||
if name.startswith("."):
|
||||
return (LOCAL, "Module name started with a dot.")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _known_pattern(name: str, config: Config) -> Optional[Tuple[str, str]]:
|
||||
parts = name.split(".")
|
||||
module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1))
|
||||
for module_name_to_check in module_names_to_check:
|
||||
for pattern, placement in config.known_patterns:
|
||||
if placement in config.sections and pattern.match(module_name_to_check):
|
||||
return (placement, f"Matched configured known pattern {pattern}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _src_path(
|
||||
name: str,
|
||||
config: Config,
|
||||
src_paths: Optional[Iterable[Path]] = None,
|
||||
prefix: Tuple[str, ...] = (),
|
||||
) -> Optional[Tuple[str, str]]:
|
||||
if src_paths is None:
|
||||
src_paths = config.src_paths
|
||||
|
||||
root_module_name, *nested_module = name.split(".", 1)
|
||||
new_prefix = prefix + (root_module_name,)
|
||||
namespace = ".".join(new_prefix)
|
||||
|
||||
for src_path in src_paths:
|
||||
module_path = (src_path / root_module_name).resolve()
|
||||
if not prefix and not module_path.is_dir() and src_path.name == root_module_name:
|
||||
module_path = src_path.resolve()
|
||||
if nested_module and (
|
||||
namespace in config.namespace_packages
|
||||
or (
|
||||
config.auto_identify_namespace_packages
|
||||
and _is_namespace_package(module_path, config.supported_extensions)
|
||||
)
|
||||
):
|
||||
return _src_path(nested_module[0], config, (module_path,), new_prefix)
|
||||
if (
|
||||
_is_module(module_path)
|
||||
or _is_package(module_path)
|
||||
or _src_path_is_module(src_path, root_module_name)
|
||||
):
|
||||
return (sections.FIRSTPARTY, f"Found in one of the configured src_paths: {src_path}.")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _is_module(path: Path) -> bool:
|
||||
return (
|
||||
exists_case_sensitive(str(path.with_suffix(".py")))
|
||||
or any(
|
||||
exists_case_sensitive(str(path.with_suffix(ext_suffix)))
|
||||
for ext_suffix in importlib.machinery.EXTENSION_SUFFIXES
|
||||
)
|
||||
or exists_case_sensitive(str(path / "__init__.py"))
|
||||
)
|
||||
|
||||
|
||||
def _is_package(path: Path) -> bool:
|
||||
return exists_case_sensitive(str(path)) and path.is_dir()
|
||||
|
||||
|
||||
def _is_namespace_package(path: Path, src_extensions: FrozenSet[str]) -> bool:
|
||||
if not _is_package(path):
|
||||
return False
|
||||
|
||||
init_file = path / "__init__.py"
|
||||
if not init_file.exists():
|
||||
filenames = [
|
||||
filepath
|
||||
for filepath in path.iterdir()
|
||||
if filepath.suffix.lstrip(".") in src_extensions
|
||||
or filepath.name.lower() in ("setup.cfg", "pyproject.toml")
|
||||
]
|
||||
if filenames:
|
||||
return False
|
||||
else:
|
||||
with init_file.open("rb") as open_init_file:
|
||||
file_start = open_init_file.read(4096)
|
||||
if (
|
||||
b"__import__('pkg_resources').declare_namespace(__name__)" not in file_start
|
||||
and b'__import__("pkg_resources").declare_namespace(__name__)' not in file_start
|
||||
and b"__path__ = __import__('pkgutil').extend_path(__path__, __name__)"
|
||||
not in file_start
|
||||
and b'__path__ = __import__("pkgutil").extend_path(__path__, __name__)'
|
||||
not in file_start
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _src_path_is_module(src_path: Path, module_name: str) -> bool:
|
||||
return (
|
||||
module_name == src_path.name and src_path.is_dir() and exists_case_sensitive(str(src_path))
|
||||
)
|
||||
86
.venv/lib/python3.8/site-packages/isort/profiles.py
Normal file
86
.venv/lib/python3.8/site-packages/isort/profiles.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
"""Common profiles are defined here to be easily used within a project using --profile {name}"""
|
||||
from typing import Any, Dict
|
||||
|
||||
black = {
|
||||
"multi_line_output": 3,
|
||||
"include_trailing_comma": True,
|
||||
"force_grid_wrap": 0,
|
||||
"use_parentheses": True,
|
||||
"ensure_newline_before_comments": True,
|
||||
"line_length": 88,
|
||||
}
|
||||
django = {
|
||||
"combine_as_imports": True,
|
||||
"include_trailing_comma": True,
|
||||
"multi_line_output": 5,
|
||||
"line_length": 79,
|
||||
}
|
||||
pycharm = {
|
||||
"multi_line_output": 3,
|
||||
"force_grid_wrap": 2,
|
||||
"lines_after_imports": 2,
|
||||
}
|
||||
google = {
|
||||
"force_single_line": True,
|
||||
"force_sort_within_sections": True,
|
||||
"lexicographical": True,
|
||||
"single_line_exclusions": ("typing",),
|
||||
"order_by_type": False,
|
||||
"group_by_package": True,
|
||||
}
|
||||
open_stack = {
|
||||
"force_single_line": True,
|
||||
"force_sort_within_sections": True,
|
||||
"lexicographical": True,
|
||||
}
|
||||
plone = {
|
||||
"force_alphabetical_sort": True,
|
||||
"force_single_line": True,
|
||||
"lines_after_imports": 2,
|
||||
"line_length": 200,
|
||||
}
|
||||
attrs = {
|
||||
"atomic": True,
|
||||
"force_grid_wrap": 0,
|
||||
"include_trailing_comma": True,
|
||||
"lines_after_imports": 2,
|
||||
"lines_between_types": 1,
|
||||
"multi_line_output": 3,
|
||||
"use_parentheses": True,
|
||||
}
|
||||
hug = {
|
||||
"multi_line_output": 3,
|
||||
"include_trailing_comma": True,
|
||||
"force_grid_wrap": 0,
|
||||
"use_parentheses": True,
|
||||
"line_length": 100,
|
||||
}
|
||||
wemake = {
|
||||
"multi_line_output": 3,
|
||||
"include_trailing_comma": True,
|
||||
"use_parentheses": True,
|
||||
"line_length": 80,
|
||||
}
|
||||
appnexus = {
|
||||
**black,
|
||||
"force_sort_within_sections": True,
|
||||
"order_by_type": False,
|
||||
"case_sensitive": False,
|
||||
"reverse_relative": True,
|
||||
"sort_relative_in_force_sorted_sections": True,
|
||||
"sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"],
|
||||
"no_lines_before": "LOCALFOLDER",
|
||||
}
|
||||
|
||||
profiles: Dict[str, Dict[str, Any]] = {
|
||||
"black": black,
|
||||
"django": django,
|
||||
"pycharm": pycharm,
|
||||
"google": google,
|
||||
"open_stack": open_stack,
|
||||
"plone": plone,
|
||||
"attrs": attrs,
|
||||
"hug": hug,
|
||||
"wemake": wemake,
|
||||
"appnexus": appnexus,
|
||||
}
|
||||
0
.venv/lib/python3.8/site-packages/isort/py.typed
Normal file
0
.venv/lib/python3.8/site-packages/isort/py.typed
Normal file
45
.venv/lib/python3.8/site-packages/isort/pylama_isort.py
Normal file
45
.venv/lib/python3.8/site-packages/isort/pylama_isort.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
import os
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
|
||||
from pylama.lint import Linter as BaseLinter # type: ignore
|
||||
|
||||
from isort.exceptions import FileSkipped
|
||||
|
||||
from . import api
|
||||
|
||||
|
||||
@contextmanager
|
||||
def suppress_stdout() -> Iterator[None]:
|
||||
stdout = sys.stdout
|
||||
with open(os.devnull, "w") as devnull:
|
||||
sys.stdout = devnull
|
||||
yield
|
||||
sys.stdout = stdout
|
||||
|
||||
|
||||
class Linter(BaseLinter): # type: ignore
|
||||
def allow(self, path: str) -> bool:
|
||||
"""Determine if this path should be linted."""
|
||||
return path.endswith(".py")
|
||||
|
||||
def run(
|
||||
self, path: str, params: Optional[Dict[str, Any]] = None, **meta: Any
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Lint the file. Return an array of error dicts if appropriate."""
|
||||
with suppress_stdout():
|
||||
try:
|
||||
if not api.check_file(path, disregard_skip=False, **params or {}):
|
||||
return [
|
||||
{
|
||||
"lnum": 0,
|
||||
"col": 0,
|
||||
"text": "Incorrectly sorted imports.",
|
||||
"type": "ISORT",
|
||||
}
|
||||
]
|
||||
except FileSkipped:
|
||||
pass
|
||||
|
||||
return []
|
||||
9
.venv/lib/python3.8/site-packages/isort/sections.py
Normal file
9
.venv/lib/python3.8/site-packages/isort/sections.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
"""Defines all sections isort uses by default"""
|
||||
from typing import Tuple
|
||||
|
||||
FUTURE: str = "FUTURE"
|
||||
STDLIB: str = "STDLIB"
|
||||
THIRDPARTY: str = "THIRDPARTY"
|
||||
FIRSTPARTY: str = "FIRSTPARTY"
|
||||
LOCALFOLDER: str = "LOCALFOLDER"
|
||||
DEFAULT: Tuple[str, ...] = (FUTURE, STDLIB, THIRDPARTY, FIRSTPARTY, LOCALFOLDER)
|
||||
925
.venv/lib/python3.8/site-packages/isort/settings.py
Normal file
925
.venv/lib/python3.8/site-packages/isort/settings.py
Normal file
|
|
@ -0,0 +1,925 @@
|
|||
"""isort/settings.py.
|
||||
|
||||
Defines how the default settings for isort should be loaded
|
||||
"""
|
||||
import configparser
|
||||
import fnmatch
|
||||
import os
|
||||
import posixpath
|
||||
import re
|
||||
import stat
|
||||
import subprocess # nosec: Needed for gitignore support.
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Set,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
from warnings import warn
|
||||
|
||||
from . import sorting, stdlibs
|
||||
from ._future import dataclass, field
|
||||
from .exceptions import (
|
||||
FormattingPluginDoesNotExist,
|
||||
InvalidSettingsPath,
|
||||
ProfileDoesNotExist,
|
||||
SortingFunctionDoesNotExist,
|
||||
UnsupportedSettings,
|
||||
)
|
||||
from .profiles import profiles
|
||||
from .sections import DEFAULT as SECTION_DEFAULTS
|
||||
from .sections import FIRSTPARTY, FUTURE, LOCALFOLDER, STDLIB, THIRDPARTY
|
||||
from .utils import Trie
|
||||
from .wrap_modes import WrapModes
|
||||
from .wrap_modes import from_string as wrap_mode_from_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
tomli: Any
|
||||
else:
|
||||
from ._vendored import tomli
|
||||
|
||||
_SHEBANG_RE = re.compile(br"^#!.*\bpython[23w]?\b")
|
||||
CYTHON_EXTENSIONS = frozenset({"pyx", "pxd"})
|
||||
SUPPORTED_EXTENSIONS = frozenset({"py", "pyi", *CYTHON_EXTENSIONS})
|
||||
BLOCKED_EXTENSIONS = frozenset({"pex"})
|
||||
FILE_SKIP_COMMENTS: Tuple[str, ...] = (
|
||||
"isort:" + "skip_file",
|
||||
"isort: " + "skip_file",
|
||||
) # Concatenated to avoid this file being skipped
|
||||
MAX_CONFIG_SEARCH_DEPTH: int = 25 # The number of parent directories to for a config file within
|
||||
STOP_CONFIG_SEARCH_ON_DIRS: Tuple[str, ...] = (".git", ".hg")
|
||||
VALID_PY_TARGETS: Tuple[str, ...] = tuple(
|
||||
target.replace("py", "") for target in dir(stdlibs) if not target.startswith("_")
|
||||
)
|
||||
CONFIG_SOURCES: Tuple[str, ...] = (
|
||||
".isort.cfg",
|
||||
"pyproject.toml",
|
||||
"setup.cfg",
|
||||
"tox.ini",
|
||||
".editorconfig",
|
||||
)
|
||||
DEFAULT_SKIP: FrozenSet[str] = frozenset(
|
||||
{
|
||||
".venv",
|
||||
"venv",
|
||||
".tox",
|
||||
".eggs",
|
||||
".git",
|
||||
".hg",
|
||||
".mypy_cache",
|
||||
".nox",
|
||||
".svn",
|
||||
".bzr",
|
||||
"_build",
|
||||
"buck-out",
|
||||
"build",
|
||||
"dist",
|
||||
".pants.d",
|
||||
".direnv",
|
||||
"node_modules",
|
||||
"__pypackages__",
|
||||
}
|
||||
)
|
||||
|
||||
CONFIG_SECTIONS: Dict[str, Tuple[str, ...]] = {
|
||||
".isort.cfg": ("settings", "isort"),
|
||||
"pyproject.toml": ("tool.isort",),
|
||||
"setup.cfg": ("isort", "tool:isort"),
|
||||
"tox.ini": ("isort", "tool:isort"),
|
||||
".editorconfig": ("*", "*.py", "**.py", "*.{py}"),
|
||||
}
|
||||
FALLBACK_CONFIG_SECTIONS: Tuple[str, ...] = ("isort", "tool:isort", "tool.isort")
|
||||
|
||||
IMPORT_HEADING_PREFIX = "import_heading_"
|
||||
IMPORT_FOOTER_PREFIX = "import_footer_"
|
||||
KNOWN_PREFIX = "known_"
|
||||
KNOWN_SECTION_MAPPING: Dict[str, str] = {
|
||||
STDLIB: "STANDARD_LIBRARY",
|
||||
FUTURE: "FUTURE_LIBRARY",
|
||||
FIRSTPARTY: "FIRST_PARTY",
|
||||
THIRDPARTY: "THIRD_PARTY",
|
||||
LOCALFOLDER: "LOCAL_FOLDER",
|
||||
}
|
||||
|
||||
RUNTIME_SOURCE = "runtime"
|
||||
|
||||
DEPRECATED_SETTINGS = ("not_skip", "keep_direct_and_as_imports")
|
||||
|
||||
_STR_BOOLEAN_MAPPING = {
|
||||
"y": True,
|
||||
"yes": True,
|
||||
"t": True,
|
||||
"on": True,
|
||||
"1": True,
|
||||
"true": True,
|
||||
"n": False,
|
||||
"no": False,
|
||||
"f": False,
|
||||
"off": False,
|
||||
"0": False,
|
||||
"false": False,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _Config:
|
||||
"""Defines the data schema and defaults used for isort configuration.
|
||||
|
||||
NOTE: known lists, such as known_standard_library, are intentionally not complete as they are
|
||||
dynamically determined later on.
|
||||
"""
|
||||
|
||||
py_version: str = "3"
|
||||
force_to_top: FrozenSet[str] = frozenset()
|
||||
skip: FrozenSet[str] = DEFAULT_SKIP
|
||||
extend_skip: FrozenSet[str] = frozenset()
|
||||
skip_glob: FrozenSet[str] = frozenset()
|
||||
extend_skip_glob: FrozenSet[str] = frozenset()
|
||||
skip_gitignore: bool = False
|
||||
line_length: int = 79
|
||||
wrap_length: int = 0
|
||||
line_ending: str = ""
|
||||
sections: Tuple[str, ...] = SECTION_DEFAULTS
|
||||
no_sections: bool = False
|
||||
known_future_library: FrozenSet[str] = frozenset(("__future__",))
|
||||
known_third_party: FrozenSet[str] = frozenset()
|
||||
known_first_party: FrozenSet[str] = frozenset()
|
||||
known_local_folder: FrozenSet[str] = frozenset()
|
||||
known_standard_library: FrozenSet[str] = frozenset()
|
||||
extra_standard_library: FrozenSet[str] = frozenset()
|
||||
known_other: Dict[str, FrozenSet[str]] = field(default_factory=dict)
|
||||
multi_line_output: WrapModes = WrapModes.GRID # type: ignore
|
||||
forced_separate: Tuple[str, ...] = ()
|
||||
indent: str = " " * 4
|
||||
comment_prefix: str = " #"
|
||||
length_sort: bool = False
|
||||
length_sort_straight: bool = False
|
||||
length_sort_sections: FrozenSet[str] = frozenset()
|
||||
add_imports: FrozenSet[str] = frozenset()
|
||||
remove_imports: FrozenSet[str] = frozenset()
|
||||
append_only: bool = False
|
||||
reverse_relative: bool = False
|
||||
force_single_line: bool = False
|
||||
single_line_exclusions: Tuple[str, ...] = ()
|
||||
default_section: str = THIRDPARTY
|
||||
import_headings: Dict[str, str] = field(default_factory=dict)
|
||||
import_footers: Dict[str, str] = field(default_factory=dict)
|
||||
balanced_wrapping: bool = False
|
||||
use_parentheses: bool = False
|
||||
order_by_type: bool = True
|
||||
atomic: bool = False
|
||||
lines_before_imports: int = -1
|
||||
lines_after_imports: int = -1
|
||||
lines_between_sections: int = 1
|
||||
lines_between_types: int = 0
|
||||
combine_as_imports: bool = False
|
||||
combine_star: bool = False
|
||||
include_trailing_comma: bool = False
|
||||
from_first: bool = False
|
||||
verbose: bool = False
|
||||
quiet: bool = False
|
||||
force_adds: bool = False
|
||||
force_alphabetical_sort_within_sections: bool = False
|
||||
force_alphabetical_sort: bool = False
|
||||
force_grid_wrap: int = 0
|
||||
force_sort_within_sections: bool = False
|
||||
lexicographical: bool = False
|
||||
group_by_package: bool = False
|
||||
ignore_whitespace: bool = False
|
||||
no_lines_before: FrozenSet[str] = frozenset()
|
||||
no_inline_sort: bool = False
|
||||
ignore_comments: bool = False
|
||||
case_sensitive: bool = False
|
||||
sources: Tuple[Dict[str, Any], ...] = ()
|
||||
virtual_env: str = ""
|
||||
conda_env: str = ""
|
||||
ensure_newline_before_comments: bool = False
|
||||
directory: str = ""
|
||||
profile: str = ""
|
||||
honor_noqa: bool = False
|
||||
src_paths: Tuple[Path, ...] = ()
|
||||
old_finders: bool = False
|
||||
remove_redundant_aliases: bool = False
|
||||
float_to_top: bool = False
|
||||
filter_files: bool = False
|
||||
formatter: str = ""
|
||||
formatting_function: Optional[Callable[[str, str, object], str]] = None
|
||||
color_output: bool = False
|
||||
treat_comments_as_code: FrozenSet[str] = frozenset()
|
||||
treat_all_comments_as_code: bool = False
|
||||
supported_extensions: FrozenSet[str] = SUPPORTED_EXTENSIONS
|
||||
blocked_extensions: FrozenSet[str] = BLOCKED_EXTENSIONS
|
||||
constants: FrozenSet[str] = frozenset()
|
||||
classes: FrozenSet[str] = frozenset()
|
||||
variables: FrozenSet[str] = frozenset()
|
||||
dedup_headings: bool = False
|
||||
only_sections: bool = False
|
||||
only_modified: bool = False
|
||||
combine_straight_imports: bool = False
|
||||
auto_identify_namespace_packages: bool = True
|
||||
namespace_packages: FrozenSet[str] = frozenset()
|
||||
follow_links: bool = True
|
||||
indented_import_headings: bool = True
|
||||
honor_case_in_force_sorted_sections: bool = False
|
||||
sort_relative_in_force_sorted_sections: bool = False
|
||||
overwrite_in_place: bool = False
|
||||
reverse_sort: bool = False
|
||||
star_first: bool = False
|
||||
import_dependencies = Dict[str, str]
|
||||
git_ignore: Dict[Path, Set[Path]] = field(default_factory=dict)
|
||||
format_error: str = "{error}: {message}"
|
||||
format_success: str = "{success}: {message}"
|
||||
sort_order: str = "natural"
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
py_version = self.py_version
|
||||
if py_version == "auto": # pragma: no cover
|
||||
if sys.version_info.major == 2 and sys.version_info.minor <= 6:
|
||||
py_version = "2"
|
||||
elif sys.version_info.major == 3 and (
|
||||
sys.version_info.minor <= 5 or sys.version_info.minor >= 10
|
||||
):
|
||||
py_version = "3"
|
||||
else:
|
||||
py_version = f"{sys.version_info.major}{sys.version_info.minor}"
|
||||
|
||||
if py_version not in VALID_PY_TARGETS:
|
||||
raise ValueError(
|
||||
f"The python version {py_version} is not supported. "
|
||||
"You can set a python version with the -py or --python-version flag. "
|
||||
f"The following versions are supported: {VALID_PY_TARGETS}"
|
||||
)
|
||||
|
||||
if py_version != "all":
|
||||
object.__setattr__(self, "py_version", f"py{py_version}")
|
||||
|
||||
if not self.known_standard_library:
|
||||
object.__setattr__(
|
||||
self, "known_standard_library", frozenset(getattr(stdlibs, self.py_version).stdlib)
|
||||
)
|
||||
|
||||
if self.multi_line_output == WrapModes.VERTICAL_GRID_GROUPED_NO_COMMA: # type: ignore
|
||||
vertical_grid_grouped = WrapModes.VERTICAL_GRID_GROUPED # type: ignore
|
||||
object.__setattr__(self, "multi_line_output", vertical_grid_grouped)
|
||||
if self.force_alphabetical_sort:
|
||||
object.__setattr__(self, "force_alphabetical_sort_within_sections", True)
|
||||
object.__setattr__(self, "no_sections", True)
|
||||
object.__setattr__(self, "lines_between_types", 1)
|
||||
object.__setattr__(self, "from_first", True)
|
||||
if self.wrap_length > self.line_length:
|
||||
raise ValueError(
|
||||
"wrap_length must be set lower than or equal to line_length: "
|
||||
f"{self.wrap_length} > {self.line_length}."
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return id(self)
|
||||
|
||||
|
||||
_DEFAULT_SETTINGS = {**vars(_Config()), "source": "defaults"}
|
||||
|
||||
|
||||
class Config(_Config):
|
||||
def __init__(
|
||||
self,
|
||||
settings_file: str = "",
|
||||
settings_path: str = "",
|
||||
config: Optional[_Config] = None,
|
||||
**config_overrides: Any,
|
||||
):
|
||||
self._known_patterns: Optional[List[Tuple[Pattern[str], str]]] = None
|
||||
self._section_comments: Optional[Tuple[str, ...]] = None
|
||||
self._section_comments_end: Optional[Tuple[str, ...]] = None
|
||||
self._skips: Optional[FrozenSet[str]] = None
|
||||
self._skip_globs: Optional[FrozenSet[str]] = None
|
||||
self._sorting_function: Optional[Callable[..., List[str]]] = None
|
||||
|
||||
if config:
|
||||
config_vars = vars(config).copy()
|
||||
config_vars.update(config_overrides)
|
||||
config_vars["py_version"] = config_vars["py_version"].replace("py", "")
|
||||
config_vars.pop("_known_patterns")
|
||||
config_vars.pop("_section_comments")
|
||||
config_vars.pop("_section_comments_end")
|
||||
config_vars.pop("_skips")
|
||||
config_vars.pop("_skip_globs")
|
||||
config_vars.pop("_sorting_function")
|
||||
super().__init__(**config_vars) # type: ignore
|
||||
return
|
||||
|
||||
# We can't use self.quiet to conditionally show warnings before super.__init__() is called
|
||||
# at the end of this method. _Config is also frozen so setting self.quiet isn't possible.
|
||||
# Therefore we extract quiet early here in a variable and use that in warning conditions.
|
||||
quiet = config_overrides.get("quiet", False)
|
||||
|
||||
sources: List[Dict[str, Any]] = [_DEFAULT_SETTINGS]
|
||||
|
||||
config_settings: Dict[str, Any]
|
||||
project_root: str
|
||||
if settings_file:
|
||||
config_settings = _get_config_data(
|
||||
settings_file,
|
||||
CONFIG_SECTIONS.get(os.path.basename(settings_file), FALLBACK_CONFIG_SECTIONS),
|
||||
)
|
||||
project_root = os.path.dirname(settings_file)
|
||||
if not config_settings and not quiet:
|
||||
warn(
|
||||
f"A custom settings file was specified: {settings_file} but no configuration "
|
||||
"was found inside. This can happen when [settings] is used as the config "
|
||||
"header instead of [isort]. "
|
||||
"See: https://pycqa.github.io/isort/docs/configuration/config_files"
|
||||
"/#custom_config_files for more information."
|
||||
)
|
||||
elif settings_path:
|
||||
if not os.path.exists(settings_path):
|
||||
raise InvalidSettingsPath(settings_path)
|
||||
|
||||
settings_path = os.path.abspath(settings_path)
|
||||
project_root, config_settings = _find_config(settings_path)
|
||||
else:
|
||||
config_settings = {}
|
||||
project_root = os.getcwd()
|
||||
|
||||
profile_name = config_overrides.get("profile", config_settings.get("profile", ""))
|
||||
profile: Dict[str, Any] = {}
|
||||
if profile_name:
|
||||
if profile_name not in profiles:
|
||||
import pkg_resources
|
||||
|
||||
for plugin in pkg_resources.iter_entry_points("isort.profiles"):
|
||||
profiles.setdefault(plugin.name, plugin.load())
|
||||
|
||||
if profile_name not in profiles:
|
||||
raise ProfileDoesNotExist(profile_name)
|
||||
|
||||
profile = profiles[profile_name].copy()
|
||||
profile["source"] = f"{profile_name} profile"
|
||||
sources.append(profile)
|
||||
|
||||
if config_settings:
|
||||
sources.append(config_settings)
|
||||
if config_overrides:
|
||||
config_overrides["source"] = RUNTIME_SOURCE
|
||||
sources.append(config_overrides)
|
||||
|
||||
combined_config = {**profile, **config_settings, **config_overrides}
|
||||
if "indent" in combined_config:
|
||||
indent = str(combined_config["indent"])
|
||||
if indent.isdigit():
|
||||
indent = " " * int(indent)
|
||||
else:
|
||||
indent = indent.strip("'").strip('"')
|
||||
if indent.lower() == "tab":
|
||||
indent = "\t"
|
||||
combined_config["indent"] = indent
|
||||
|
||||
known_other = {}
|
||||
import_headings = {}
|
||||
import_footers = {}
|
||||
for key, value in tuple(combined_config.items()):
|
||||
# Collect all known sections beyond those that have direct entries
|
||||
if key.startswith(KNOWN_PREFIX) and key not in (
|
||||
"known_standard_library",
|
||||
"known_future_library",
|
||||
"known_third_party",
|
||||
"known_first_party",
|
||||
"known_local_folder",
|
||||
):
|
||||
import_heading = key[len(KNOWN_PREFIX) :].lower()
|
||||
maps_to_section = import_heading.upper()
|
||||
combined_config.pop(key)
|
||||
if maps_to_section in KNOWN_SECTION_MAPPING:
|
||||
section_name = f"known_{KNOWN_SECTION_MAPPING[maps_to_section].lower()}"
|
||||
if section_name in combined_config and not quiet:
|
||||
warn(
|
||||
f"Can't set both {key} and {section_name} in the same config file.\n"
|
||||
f"Default to {section_name} if unsure."
|
||||
"\n\n"
|
||||
"See: https://pycqa.github.io/isort/"
|
||||
"#custom-sections-and-ordering."
|
||||
)
|
||||
else:
|
||||
combined_config[section_name] = frozenset(value)
|
||||
else:
|
||||
known_other[import_heading] = frozenset(value)
|
||||
if maps_to_section not in combined_config.get("sections", ()) and not quiet:
|
||||
warn(
|
||||
f"`{key}` setting is defined, but {maps_to_section} is not"
|
||||
" included in `sections` config option:"
|
||||
f" {combined_config.get('sections', SECTION_DEFAULTS)}.\n\n"
|
||||
"See: https://pycqa.github.io/isort/"
|
||||
"#custom-sections-and-ordering."
|
||||
)
|
||||
if key.startswith(IMPORT_HEADING_PREFIX):
|
||||
import_headings[key[len(IMPORT_HEADING_PREFIX) :].lower()] = str(value)
|
||||
if key.startswith(IMPORT_FOOTER_PREFIX):
|
||||
import_footers[key[len(IMPORT_FOOTER_PREFIX) :].lower()] = str(value)
|
||||
|
||||
# Coerce all provided config values into their correct type
|
||||
default_value = _DEFAULT_SETTINGS.get(key, None)
|
||||
if default_value is None:
|
||||
continue
|
||||
|
||||
combined_config[key] = type(default_value)(value)
|
||||
|
||||
for section in combined_config.get("sections", ()):
|
||||
if section in SECTION_DEFAULTS:
|
||||
continue
|
||||
|
||||
if not section.lower() in known_other:
|
||||
config_keys = ", ".join(known_other.keys())
|
||||
warn(
|
||||
f"`sections` setting includes {section}, but no known_{section.lower()} "
|
||||
"is defined. "
|
||||
f"The following known_SECTION config options are defined: {config_keys}."
|
||||
)
|
||||
|
||||
if "directory" not in combined_config:
|
||||
combined_config["directory"] = (
|
||||
os.path.dirname(config_settings["source"])
|
||||
if config_settings.get("source", None)
|
||||
else os.getcwd()
|
||||
)
|
||||
|
||||
path_root = Path(combined_config.get("directory", project_root)).resolve()
|
||||
path_root = path_root if path_root.is_dir() else path_root.parent
|
||||
if "src_paths" not in combined_config:
|
||||
combined_config["src_paths"] = (path_root / "src", path_root)
|
||||
else:
|
||||
src_paths: List[Path] = []
|
||||
for src_path in combined_config.get("src_paths", ()):
|
||||
full_paths = (
|
||||
path_root.glob(src_path) if "*" in str(src_path) else [path_root / src_path]
|
||||
)
|
||||
for path in full_paths:
|
||||
if path not in src_paths:
|
||||
src_paths.append(path)
|
||||
|
||||
combined_config["src_paths"] = tuple(src_paths)
|
||||
|
||||
if "formatter" in combined_config:
|
||||
import pkg_resources
|
||||
|
||||
for plugin in pkg_resources.iter_entry_points("isort.formatters"):
|
||||
if plugin.name == combined_config["formatter"]:
|
||||
combined_config["formatting_function"] = plugin.load()
|
||||
break
|
||||
else:
|
||||
raise FormattingPluginDoesNotExist(combined_config["formatter"])
|
||||
|
||||
# Remove any config values that are used for creating config object but
|
||||
# aren't defined in dataclass
|
||||
combined_config.pop("source", None)
|
||||
combined_config.pop("sources", None)
|
||||
combined_config.pop("runtime_src_paths", None)
|
||||
|
||||
deprecated_options_used = [
|
||||
option for option in combined_config if option in DEPRECATED_SETTINGS
|
||||
]
|
||||
if deprecated_options_used:
|
||||
for deprecated_option in deprecated_options_used:
|
||||
combined_config.pop(deprecated_option)
|
||||
if not quiet:
|
||||
warn(
|
||||
"W0503: Deprecated config options were used: "
|
||||
f"{', '.join(deprecated_options_used)}."
|
||||
"Please see the 5.0.0 upgrade guide: "
|
||||
"https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html"
|
||||
)
|
||||
|
||||
if known_other:
|
||||
combined_config["known_other"] = known_other
|
||||
if import_headings:
|
||||
for import_heading_key in import_headings:
|
||||
combined_config.pop(f"{IMPORT_HEADING_PREFIX}{import_heading_key}")
|
||||
combined_config["import_headings"] = import_headings
|
||||
if import_footers:
|
||||
for import_footer_key in import_footers:
|
||||
combined_config.pop(f"{IMPORT_FOOTER_PREFIX}{import_footer_key}")
|
||||
combined_config["import_footers"] = import_footers
|
||||
|
||||
unsupported_config_errors = {}
|
||||
for option in set(combined_config.keys()).difference(
|
||||
getattr(_Config, "__dataclass_fields__", {}).keys()
|
||||
):
|
||||
for source in reversed(sources):
|
||||
if option in source:
|
||||
unsupported_config_errors[option] = {
|
||||
"value": source[option],
|
||||
"source": source["source"],
|
||||
}
|
||||
if unsupported_config_errors:
|
||||
raise UnsupportedSettings(unsupported_config_errors)
|
||||
|
||||
super().__init__(sources=tuple(sources), **combined_config) # type: ignore
|
||||
|
||||
def is_supported_filetype(self, file_name: str) -> bool:
|
||||
_root, ext = os.path.splitext(file_name)
|
||||
ext = ext.lstrip(".")
|
||||
if ext in self.supported_extensions:
|
||||
return True
|
||||
if ext in self.blocked_extensions:
|
||||
return False
|
||||
|
||||
# Skip editor backup files.
|
||||
if file_name.endswith("~"):
|
||||
return False
|
||||
|
||||
try:
|
||||
if stat.S_ISFIFO(os.stat(file_name).st_mode):
|
||||
return False
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
try:
|
||||
with open(file_name, "rb") as fp:
|
||||
line = fp.readline(100)
|
||||
except OSError:
|
||||
return False
|
||||
else:
|
||||
return bool(_SHEBANG_RE.match(line))
|
||||
|
||||
def _check_folder_gitignore(self, folder: str) -> Optional[Path]:
|
||||
env = {**os.environ, "LANG": "C.UTF-8"}
|
||||
try:
|
||||
topfolder_result = subprocess.check_output( # nosec # skipcq: PYL-W1510
|
||||
["git", "-C", folder, "rev-parse", "--show-toplevel"], encoding="utf-8", env=env
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
git_folder = Path(topfolder_result.rstrip()).resolve()
|
||||
|
||||
files: List[str] = []
|
||||
# don't check symlinks; either part of the repo and would be checked
|
||||
# twice, or is external to the repo and git won't know anything about it
|
||||
for root, _dirs, git_files in os.walk(git_folder, followlinks=False):
|
||||
if ".git" in _dirs:
|
||||
_dirs.remove(".git")
|
||||
for git_file in git_files:
|
||||
files.append(os.path.join(root, git_file))
|
||||
git_options = ["-C", str(git_folder), "-c", "core.quotePath="]
|
||||
try:
|
||||
ignored = subprocess.check_output( # nosec # skipcq: PYL-W1510
|
||||
["git", *git_options, "check-ignore", "-z", "--stdin", "--no-index"],
|
||||
encoding="utf-8",
|
||||
env=env,
|
||||
input="\0".join(files),
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
self.git_ignore[git_folder] = {Path(f) for f in ignored.rstrip("\0").split("\0")}
|
||||
return git_folder
|
||||
|
||||
def is_skipped(self, file_path: Path) -> bool:
|
||||
"""Returns True if the file and/or folder should be skipped based on current settings."""
|
||||
if self.directory and Path(self.directory) in file_path.resolve().parents:
|
||||
file_name = os.path.relpath(file_path.resolve(), self.directory)
|
||||
else:
|
||||
file_name = str(file_path)
|
||||
|
||||
os_path = str(file_path)
|
||||
|
||||
normalized_path = os_path.replace("\\", "/")
|
||||
if normalized_path[1:2] == ":":
|
||||
normalized_path = normalized_path[2:]
|
||||
|
||||
for skip_path in self.skips:
|
||||
if posixpath.abspath(normalized_path) == posixpath.abspath(
|
||||
skip_path.replace("\\", "/")
|
||||
):
|
||||
return True
|
||||
|
||||
position = os.path.split(file_name)
|
||||
while position[1]:
|
||||
if position[1] in self.skips:
|
||||
return True
|
||||
position = os.path.split(position[0])
|
||||
|
||||
for sglob in self.skip_globs:
|
||||
if fnmatch.fnmatch(file_name, sglob) or fnmatch.fnmatch("/" + file_name, sglob):
|
||||
return True
|
||||
|
||||
if not (os.path.isfile(os_path) or os.path.isdir(os_path) or os.path.islink(os_path)):
|
||||
return True
|
||||
|
||||
if self.skip_gitignore:
|
||||
if file_path.name == ".git": # pragma: no cover
|
||||
return True
|
||||
|
||||
git_folder = None
|
||||
|
||||
file_paths = [file_path, file_path.resolve()]
|
||||
for folder in self.git_ignore:
|
||||
if any(folder in path.parents for path in file_paths):
|
||||
git_folder = folder
|
||||
break
|
||||
else:
|
||||
git_folder = self._check_folder_gitignore(str(file_path.parent))
|
||||
|
||||
if git_folder and any(path in self.git_ignore[git_folder] for path in file_paths):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def known_patterns(self) -> List[Tuple[Pattern[str], str]]:
|
||||
if self._known_patterns is not None:
|
||||
return self._known_patterns
|
||||
|
||||
self._known_patterns = []
|
||||
pattern_sections = [STDLIB] + [section for section in self.sections if section != STDLIB]
|
||||
for placement in reversed(pattern_sections):
|
||||
known_placement = KNOWN_SECTION_MAPPING.get(placement, placement).lower()
|
||||
config_key = f"{KNOWN_PREFIX}{known_placement}"
|
||||
known_modules = getattr(self, config_key, self.known_other.get(known_placement, ()))
|
||||
extra_modules = getattr(self, f"extra_{known_placement}", ())
|
||||
all_modules = set(extra_modules).union(known_modules)
|
||||
known_patterns = [
|
||||
pattern
|
||||
for known_pattern in all_modules
|
||||
for pattern in self._parse_known_pattern(known_pattern)
|
||||
]
|
||||
for known_pattern in known_patterns:
|
||||
regexp = "^" + known_pattern.replace("*", ".*").replace("?", ".?") + "$"
|
||||
self._known_patterns.append((re.compile(regexp), placement))
|
||||
|
||||
return self._known_patterns
|
||||
|
||||
@property
|
||||
def section_comments(self) -> Tuple[str, ...]:
|
||||
if self._section_comments is not None:
|
||||
return self._section_comments
|
||||
|
||||
self._section_comments = tuple(f"# {heading}" for heading in self.import_headings.values())
|
||||
return self._section_comments
|
||||
|
||||
@property
|
||||
def section_comments_end(self) -> Tuple[str, ...]:
|
||||
if self._section_comments_end is not None:
|
||||
return self._section_comments_end
|
||||
|
||||
self._section_comments_end = tuple(f"# {footer}" for footer in self.import_footers.values())
|
||||
return self._section_comments_end
|
||||
|
||||
@property
|
||||
def skips(self) -> FrozenSet[str]:
|
||||
if self._skips is not None:
|
||||
return self._skips
|
||||
|
||||
self._skips = self.skip.union(self.extend_skip)
|
||||
return self._skips
|
||||
|
||||
@property
|
||||
def skip_globs(self) -> FrozenSet[str]:
|
||||
if self._skip_globs is not None:
|
||||
return self._skip_globs
|
||||
|
||||
self._skip_globs = self.skip_glob.union(self.extend_skip_glob)
|
||||
return self._skip_globs
|
||||
|
||||
@property
|
||||
def sorting_function(self) -> Callable[..., List[str]]:
|
||||
if self._sorting_function is not None:
|
||||
return self._sorting_function
|
||||
|
||||
if self.sort_order == "natural":
|
||||
self._sorting_function = sorting.naturally
|
||||
elif self.sort_order == "native":
|
||||
self._sorting_function = sorted
|
||||
else:
|
||||
available_sort_orders = ["natural", "native"]
|
||||
import pkg_resources
|
||||
|
||||
for sort_plugin in pkg_resources.iter_entry_points("isort.sort_function"):
|
||||
available_sort_orders.append(sort_plugin.name)
|
||||
if sort_plugin.name == self.sort_order:
|
||||
self._sorting_function = sort_plugin.load()
|
||||
break
|
||||
else:
|
||||
raise SortingFunctionDoesNotExist(self.sort_order, available_sort_orders)
|
||||
|
||||
return self._sorting_function
|
||||
|
||||
def _parse_known_pattern(self, pattern: str) -> List[str]:
|
||||
"""Expand pattern if identified as a directory and return found sub packages"""
|
||||
if pattern.endswith(os.path.sep):
|
||||
patterns = [
|
||||
filename
|
||||
for filename in os.listdir(os.path.join(self.directory, pattern))
|
||||
if os.path.isdir(os.path.join(self.directory, pattern, filename))
|
||||
]
|
||||
else:
|
||||
patterns = [pattern]
|
||||
|
||||
return patterns
|
||||
|
||||
|
||||
def _get_str_to_type_converter(setting_name: str) -> Union[Callable[[str], Any], Type[Any]]:
|
||||
type_converter: Union[Callable[[str], Any], Type[Any]] = type(
|
||||
_DEFAULT_SETTINGS.get(setting_name, "")
|
||||
)
|
||||
if type_converter == WrapModes:
|
||||
type_converter = wrap_mode_from_string
|
||||
return type_converter
|
||||
|
||||
|
||||
def _as_list(value: str) -> List[str]:
|
||||
if isinstance(value, list):
|
||||
return [item.strip() for item in value]
|
||||
filtered = [item.strip() for item in value.replace("\n", ",").split(",") if item.strip()]
|
||||
return filtered
|
||||
|
||||
|
||||
def _abspaths(cwd: str, values: Iterable[str]) -> Set[str]:
|
||||
paths = {
|
||||
os.path.join(cwd, value)
|
||||
if not value.startswith(os.path.sep) and value.endswith(os.path.sep)
|
||||
else value
|
||||
for value in values
|
||||
}
|
||||
return paths
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _find_config(path: str) -> Tuple[str, Dict[str, Any]]:
|
||||
current_directory = path
|
||||
tries = 0
|
||||
while current_directory and tries < MAX_CONFIG_SEARCH_DEPTH:
|
||||
for config_file_name in CONFIG_SOURCES:
|
||||
potential_config_file = os.path.join(current_directory, config_file_name)
|
||||
if os.path.isfile(potential_config_file):
|
||||
config_data: Dict[str, Any]
|
||||
try:
|
||||
config_data = _get_config_data(
|
||||
potential_config_file, CONFIG_SECTIONS[config_file_name]
|
||||
)
|
||||
except Exception:
|
||||
warn(f"Failed to pull configuration information from {potential_config_file}")
|
||||
config_data = {}
|
||||
if config_data:
|
||||
return (current_directory, config_data)
|
||||
|
||||
for stop_dir in STOP_CONFIG_SEARCH_ON_DIRS:
|
||||
if os.path.isdir(os.path.join(current_directory, stop_dir)):
|
||||
return (current_directory, {})
|
||||
|
||||
new_directory = os.path.split(current_directory)[0]
|
||||
if new_directory == current_directory:
|
||||
break
|
||||
|
||||
current_directory = new_directory
|
||||
tries += 1
|
||||
|
||||
return (path, {})
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def find_all_configs(path: str) -> Trie:
|
||||
"""
|
||||
Looks for config files in the path provided and in all of its sub-directories.
|
||||
Parses and stores any config file encountered in a trie and returns the root of
|
||||
the trie
|
||||
"""
|
||||
trie_root = Trie("default", {})
|
||||
|
||||
for (dirpath, _, _) in os.walk(path):
|
||||
for config_file_name in CONFIG_SOURCES:
|
||||
potential_config_file = os.path.join(dirpath, config_file_name)
|
||||
if os.path.isfile(potential_config_file):
|
||||
config_data: Dict[str, Any]
|
||||
try:
|
||||
config_data = _get_config_data(
|
||||
potential_config_file, CONFIG_SECTIONS[config_file_name]
|
||||
)
|
||||
except Exception:
|
||||
warn(f"Failed to pull configuration information from {potential_config_file}")
|
||||
config_data = {}
|
||||
|
||||
if config_data:
|
||||
trie_root.insert(potential_config_file, config_data)
|
||||
break
|
||||
|
||||
return trie_root
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _get_config_data(file_path: str, sections: Tuple[str]) -> Dict[str, Any]:
|
||||
settings: Dict[str, Any] = {}
|
||||
|
||||
if file_path.endswith(".toml"):
|
||||
with open(file_path, "rb") as bin_config_file:
|
||||
config = tomli.load(bin_config_file)
|
||||
for section in sections:
|
||||
config_section = config
|
||||
for key in section.split("."):
|
||||
config_section = config_section.get(key, {})
|
||||
settings.update(config_section)
|
||||
else:
|
||||
with open(file_path, encoding="utf-8") as config_file:
|
||||
if file_path.endswith(".editorconfig"):
|
||||
line = "\n"
|
||||
last_position = config_file.tell()
|
||||
while line:
|
||||
line = config_file.readline()
|
||||
if "[" in line:
|
||||
config_file.seek(last_position)
|
||||
break
|
||||
last_position = config_file.tell()
|
||||
|
||||
config = configparser.ConfigParser(strict=False)
|
||||
config.read_file(config_file)
|
||||
for section in sections:
|
||||
if section.startswith("*.{") and section.endswith("}"):
|
||||
extension = section[len("*.{") : -1]
|
||||
for config_key in config.keys():
|
||||
if (
|
||||
config_key.startswith("*.{")
|
||||
and config_key.endswith("}")
|
||||
and extension
|
||||
in map(
|
||||
lambda text: text.strip(), config_key[len("*.{") : -1].split(",") # type: ignore # noqa
|
||||
)
|
||||
):
|
||||
settings.update(config.items(config_key))
|
||||
|
||||
elif config.has_section(section):
|
||||
settings.update(config.items(section))
|
||||
|
||||
if settings:
|
||||
settings["source"] = file_path
|
||||
|
||||
if file_path.endswith(".editorconfig"):
|
||||
indent_style = settings.pop("indent_style", "").strip()
|
||||
indent_size = settings.pop("indent_size", "").strip()
|
||||
if indent_size == "tab":
|
||||
indent_size = settings.pop("tab_width", "").strip()
|
||||
|
||||
if indent_style == "space":
|
||||
settings["indent"] = " " * (indent_size and int(indent_size) or 4)
|
||||
|
||||
elif indent_style == "tab":
|
||||
settings["indent"] = "\t" * (indent_size and int(indent_size) or 1)
|
||||
|
||||
max_line_length = settings.pop("max_line_length", "").strip()
|
||||
if max_line_length and (max_line_length == "off" or max_line_length.isdigit()):
|
||||
settings["line_length"] = (
|
||||
float("inf") if max_line_length == "off" else int(max_line_length)
|
||||
)
|
||||
settings = {
|
||||
key: value
|
||||
for key, value in settings.items()
|
||||
if key in _DEFAULT_SETTINGS.keys() or key.startswith(KNOWN_PREFIX)
|
||||
}
|
||||
|
||||
for key, value in settings.items():
|
||||
existing_value_type = _get_str_to_type_converter(key)
|
||||
if existing_value_type == tuple:
|
||||
settings[key] = tuple(_as_list(value))
|
||||
elif existing_value_type == frozenset:
|
||||
settings[key] = frozenset(_as_list(settings.get(key))) # type: ignore
|
||||
elif existing_value_type == bool:
|
||||
# Only some configuration formats support native boolean values.
|
||||
if not isinstance(value, bool):
|
||||
value = _as_bool(value)
|
||||
settings[key] = value
|
||||
elif key.startswith(KNOWN_PREFIX):
|
||||
settings[key] = _abspaths(os.path.dirname(file_path), _as_list(value))
|
||||
elif key == "force_grid_wrap":
|
||||
try:
|
||||
result = existing_value_type(value)
|
||||
except ValueError: # backwards compatibility for true / false force grid wrap
|
||||
result = 0 if value.lower().strip() == "false" else 2
|
||||
settings[key] = result
|
||||
elif key == "comment_prefix":
|
||||
settings[key] = str(value).strip("'").strip('"')
|
||||
else:
|
||||
settings[key] = existing_value_type(value)
|
||||
|
||||
return settings
|
||||
|
||||
|
||||
def _as_bool(value: str) -> bool:
|
||||
"""Given a string value that represents True or False, returns the Boolean equivalent.
|
||||
Heavily inspired from distutils strtobool.
|
||||
"""
|
||||
try:
|
||||
return _STR_BOOLEAN_MAPPING[value.lower()]
|
||||
except KeyError:
|
||||
raise ValueError(f"invalid truth value {value}")
|
||||
|
||||
|
||||
DEFAULT_CONFIG = Config()
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
import glob
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, Dict, Iterator, List
|
||||
from warnings import warn
|
||||
|
||||
import setuptools # type: ignore
|
||||
|
||||
from . import api
|
||||
from .settings import DEFAULT_CONFIG
|
||||
|
||||
|
||||
class ISortCommand(setuptools.Command): # type: ignore
|
||||
"""The :class:`ISortCommand` class is used by setuptools to perform
|
||||
imports checks on registered modules.
|
||||
"""
|
||||
|
||||
description = "Run isort on modules registered in setuptools"
|
||||
user_options: List[Any] = []
|
||||
|
||||
def initialize_options(self) -> None:
|
||||
default_settings = vars(DEFAULT_CONFIG).copy()
|
||||
for key, value in default_settings.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
def finalize_options(self) -> None:
|
||||
"""Get options from config files."""
|
||||
self.arguments: Dict[str, Any] = {} # skipcq: PYL-W0201
|
||||
self.arguments["settings_path"] = os.getcwd()
|
||||
|
||||
def distribution_files(self) -> Iterator[str]:
|
||||
"""Find distribution packages."""
|
||||
# This is verbatim from flake8
|
||||
if self.distribution.packages: # pragma: no cover
|
||||
package_dirs = self.distribution.package_dir or {}
|
||||
for package in self.distribution.packages:
|
||||
pkg_dir = package
|
||||
if package in package_dirs:
|
||||
pkg_dir = package_dirs[package]
|
||||
elif "" in package_dirs: # pragma: no cover
|
||||
pkg_dir = package_dirs[""] + os.path.sep + pkg_dir
|
||||
yield pkg_dir.replace(".", os.path.sep)
|
||||
|
||||
if self.distribution.py_modules:
|
||||
for filename in self.distribution.py_modules:
|
||||
yield "%s.py" % filename
|
||||
# Don't miss the setup.py file itself
|
||||
yield "setup.py"
|
||||
|
||||
def run(self) -> None:
|
||||
arguments = self.arguments
|
||||
wrong_sorted_files = False
|
||||
for path in self.distribution_files():
|
||||
for python_file in glob.iglob(os.path.join(path, "*.py")):
|
||||
try:
|
||||
if not api.check_file(python_file, **arguments):
|
||||
wrong_sorted_files = True # pragma: no cover
|
||||
except OSError as error: # pragma: no cover
|
||||
warn(f"Unable to parse file {python_file} due to {error}")
|
||||
if wrong_sorted_files:
|
||||
sys.exit(1) # pragma: no cover
|
||||
130
.venv/lib/python3.8/site-packages/isort/sorting.py
Normal file
130
.venv/lib/python3.8/site-packages/isort/sorting.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
import re
|
||||
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .settings import Config
|
||||
else:
|
||||
Config = Any
|
||||
|
||||
_import_line_intro_re = re.compile("^(?:from|import) ")
|
||||
_import_line_midline_import_re = re.compile(" import ")
|
||||
|
||||
|
||||
def module_key(
|
||||
module_name: str,
|
||||
config: Config,
|
||||
sub_imports: bool = False,
|
||||
ignore_case: bool = False,
|
||||
section_name: Optional[Any] = None,
|
||||
straight_import: Optional[bool] = False,
|
||||
) -> str:
|
||||
match = re.match(r"^(\.+)\s*(.*)", module_name)
|
||||
if match:
|
||||
sep = " " if config.reverse_relative else "_"
|
||||
module_name = sep.join(match.groups())
|
||||
|
||||
prefix = ""
|
||||
if ignore_case:
|
||||
module_name = str(module_name).lower()
|
||||
else:
|
||||
module_name = str(module_name)
|
||||
|
||||
if sub_imports and config.order_by_type:
|
||||
if module_name in config.constants:
|
||||
prefix = "A"
|
||||
elif module_name in config.classes:
|
||||
prefix = "B"
|
||||
elif module_name in config.variables:
|
||||
prefix = "C"
|
||||
elif module_name.isupper() and len(module_name) > 1: # see issue #376
|
||||
prefix = "A"
|
||||
elif module_name in config.classes or module_name[0:1].isupper():
|
||||
prefix = "B"
|
||||
else:
|
||||
prefix = "C"
|
||||
if not config.case_sensitive:
|
||||
module_name = module_name.lower()
|
||||
|
||||
length_sort = (
|
||||
config.length_sort
|
||||
or (config.length_sort_straight and straight_import)
|
||||
or str(section_name).lower() in config.length_sort_sections
|
||||
)
|
||||
_length_sort_maybe = (str(len(module_name)) + ":" + module_name) if length_sort else module_name
|
||||
return f"{module_name in config.force_to_top and 'A' or 'B'}{prefix}{_length_sort_maybe}"
|
||||
|
||||
|
||||
def section_key(line: str, config: Config) -> str:
|
||||
section = "B"
|
||||
|
||||
if (
|
||||
not config.sort_relative_in_force_sorted_sections
|
||||
and config.reverse_relative
|
||||
and line.startswith("from .")
|
||||
):
|
||||
match = re.match(r"^from (\.+)\s*(.*)", line)
|
||||
if match: # pragma: no cover - regex always matches if line starts with "from ."
|
||||
line = f"from {' '.join(match.groups())}"
|
||||
if config.group_by_package and line.strip().startswith("from"):
|
||||
line = line.split(" import", 1)[0]
|
||||
|
||||
if config.lexicographical:
|
||||
line = _import_line_intro_re.sub("", _import_line_midline_import_re.sub(".", line))
|
||||
else:
|
||||
line = re.sub("^from ", "", line)
|
||||
line = re.sub("^import ", "", line)
|
||||
if config.sort_relative_in_force_sorted_sections:
|
||||
sep = " " if config.reverse_relative else "_"
|
||||
line = re.sub(r"^(\.+)", fr"\1{sep}", line)
|
||||
if line.split(" ")[0] in config.force_to_top:
|
||||
section = "A"
|
||||
# * If honor_case_in_force_sorted_sections is true, and case_sensitive and
|
||||
# order_by_type are different, only ignore case in part of the line.
|
||||
# * Otherwise, let order_by_type decide the sorting of the whole line. This
|
||||
# is only "correct" if case_sensitive and order_by_type have the same value.
|
||||
if config.honor_case_in_force_sorted_sections and config.case_sensitive != config.order_by_type:
|
||||
split_module = line.split(" import ", 1)
|
||||
if len(split_module) > 1:
|
||||
module_name, names = split_module
|
||||
if not config.case_sensitive:
|
||||
module_name = module_name.lower()
|
||||
if not config.order_by_type:
|
||||
names = names.lower()
|
||||
line = " import ".join([module_name, names])
|
||||
elif not config.case_sensitive:
|
||||
line = line.lower()
|
||||
elif not config.order_by_type:
|
||||
line = line.lower()
|
||||
|
||||
return f"{section}{len(line) if config.length_sort else ''}{line}"
|
||||
|
||||
|
||||
def sort(
|
||||
config: Config,
|
||||
to_sort: Iterable[str],
|
||||
key: Optional[Callable[[str], Any]] = None,
|
||||
reverse: bool = False,
|
||||
) -> List[str]:
|
||||
return config.sorting_function(to_sort, key=key, reverse=reverse)
|
||||
|
||||
|
||||
def naturally(
|
||||
to_sort: Iterable[str], key: Optional[Callable[[str], Any]] = None, reverse: bool = False
|
||||
) -> List[str]:
|
||||
"""Returns a naturally sorted list"""
|
||||
if key is None:
|
||||
key_callback = _natural_keys
|
||||
else:
|
||||
|
||||
def key_callback(text: str) -> List[Any]:
|
||||
return _natural_keys(key(text)) # type: ignore
|
||||
|
||||
return sorted(to_sort, key=key_callback, reverse=reverse)
|
||||
|
||||
|
||||
def _atoi(text: str) -> Any:
|
||||
return int(text) if text.isdigit() else text
|
||||
|
||||
|
||||
def _natural_keys(text: str) -> List[Any]:
|
||||
return [_atoi(c) for c in re.split(r"(\d+)", text)]
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
from . import all as _all
|
||||
from . import py2, py3, py27, py35, py36, py37, py38, py39, py310
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
3
.venv/lib/python3.8/site-packages/isort/stdlibs/all.py
Normal file
3
.venv/lib/python3.8/site-packages/isort/stdlibs/all.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from . import py2, py3
|
||||
|
||||
stdlib = py2.stdlib | py3.stdlib
|
||||
3
.venv/lib/python3.8/site-packages/isort/stdlibs/py2.py
Normal file
3
.venv/lib/python3.8/site-packages/isort/stdlibs/py2.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from . import py27
|
||||
|
||||
stdlib = py27.stdlib
|
||||
301
.venv/lib/python3.8/site-packages/isort/stdlibs/py27.py
Normal file
301
.venv/lib/python3.8/site-packages/isort/stdlibs/py27.py
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
"""
|
||||
File contains the standard library of Python 2.7.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"AL",
|
||||
"BaseHTTPServer",
|
||||
"Bastion",
|
||||
"CGIHTTPServer",
|
||||
"Carbon",
|
||||
"ColorPicker",
|
||||
"ConfigParser",
|
||||
"Cookie",
|
||||
"DEVICE",
|
||||
"DocXMLRPCServer",
|
||||
"EasyDialogs",
|
||||
"FL",
|
||||
"FrameWork",
|
||||
"GL",
|
||||
"HTMLParser",
|
||||
"MacOS",
|
||||
"MimeWriter",
|
||||
"MiniAEFrame",
|
||||
"Nav",
|
||||
"PixMapWrapper",
|
||||
"Queue",
|
||||
"SUNAUDIODEV",
|
||||
"ScrolledText",
|
||||
"SimpleHTTPServer",
|
||||
"SimpleXMLRPCServer",
|
||||
"SocketServer",
|
||||
"StringIO",
|
||||
"Tix",
|
||||
"Tkinter",
|
||||
"UserDict",
|
||||
"UserList",
|
||||
"UserString",
|
||||
"W",
|
||||
"__builtin__",
|
||||
"_ast",
|
||||
"_winreg",
|
||||
"abc",
|
||||
"aepack",
|
||||
"aetools",
|
||||
"aetypes",
|
||||
"aifc",
|
||||
"al",
|
||||
"anydbm",
|
||||
"applesingle",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"autoGIL",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"bsddb",
|
||||
"buildtools",
|
||||
"bz2",
|
||||
"cPickle",
|
||||
"cProfile",
|
||||
"cStringIO",
|
||||
"calendar",
|
||||
"cd",
|
||||
"cfmfile",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"commands",
|
||||
"compileall",
|
||||
"compiler",
|
||||
"contextlib",
|
||||
"cookielib",
|
||||
"copy",
|
||||
"copy_reg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"datetime",
|
||||
"dbhash",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dircache",
|
||||
"dis",
|
||||
"distutils",
|
||||
"dl",
|
||||
"doctest",
|
||||
"dumbdbm",
|
||||
"dummy_thread",
|
||||
"dummy_threading",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"errno",
|
||||
"exceptions",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"findertools",
|
||||
"fl",
|
||||
"flp",
|
||||
"fm",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fpectl",
|
||||
"fpformat",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"future_builtins",
|
||||
"gc",
|
||||
"gdbm",
|
||||
"gensuitemodule",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"gl",
|
||||
"glob",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"hotshot",
|
||||
"htmlentitydefs",
|
||||
"htmllib",
|
||||
"httplib",
|
||||
"ic",
|
||||
"icopen",
|
||||
"imageop",
|
||||
"imaplib",
|
||||
"imgfile",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"imputil",
|
||||
"inspect",
|
||||
"io",
|
||||
"itertools",
|
||||
"jpeg",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"macerrors",
|
||||
"macostools",
|
||||
"macpath",
|
||||
"macresource",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"md5",
|
||||
"mhlib",
|
||||
"mimetools",
|
||||
"mimetypes",
|
||||
"mimify",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multifile",
|
||||
"multiprocessing",
|
||||
"mutex",
|
||||
"netrc",
|
||||
"new",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"popen2",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixfile",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"resource",
|
||||
"rexec",
|
||||
"rfc822",
|
||||
"rlcompleter",
|
||||
"robotparser",
|
||||
"runpy",
|
||||
"sched",
|
||||
"select",
|
||||
"sets",
|
||||
"sgmllib",
|
||||
"sha",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statvfs",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"sunaudiodev",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"thread",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"ttk",
|
||||
"tty",
|
||||
"turtle",
|
||||
"types",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"urllib2",
|
||||
"urlparse",
|
||||
"user",
|
||||
"uu",
|
||||
"uuid",
|
||||
"videoreader",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"whichdb",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpclib",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
}
|
||||
3
.venv/lib/python3.8/site-packages/isort/stdlibs/py3.py
Normal file
3
.venv/lib/python3.8/site-packages/isort/stdlibs/py3.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from . import py35, py36, py37, py38, py39, py310
|
||||
|
||||
stdlib = py35.stdlib | py36.stdlib | py37.stdlib | py38.stdlib | py39.stdlib | py310.stdlib
|
||||
221
.venv/lib/python3.8/site-packages/isort/stdlibs/py310.py
Normal file
221
.venv/lib/python3.8/site-packages/isort/stdlibs/py310.py
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.10.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"contextvars",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"dataclasses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"graphlib",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"secrets",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
"zoneinfo",
|
||||
}
|
||||
223
.venv/lib/python3.8/site-packages/isort/stdlibs/py35.py
Normal file
223
.venv/lib/python3.8/site-packages/isort/stdlibs/py35.py
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.5.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_dummy_thread",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"dummy_threading",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fpectl",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"macpath",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
}
|
||||
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py36.py
Normal file
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py36.py
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.6.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_dummy_thread",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"dummy_threading",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fpectl",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"macpath",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"secrets",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
}
|
||||
225
.venv/lib/python3.8/site-packages/isort/stdlibs/py37.py
Normal file
225
.venv/lib/python3.8/site-packages/isort/stdlibs/py37.py
Normal file
|
|
@ -0,0 +1,225 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.7.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_dummy_thread",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"contextvars",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"dataclasses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"dummy_threading",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"macpath",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"secrets",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
}
|
||||
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py38.py
Normal file
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py38.py
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.8.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_dummy_thread",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"contextvars",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"dataclasses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"dummy_threading",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"secrets",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
}
|
||||
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py39.py
Normal file
224
.venv/lib/python3.8/site-packages/isort/stdlibs/py39.py
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
"""
|
||||
File contains the standard library of Python 3.9.
|
||||
|
||||
DO NOT EDIT. If the standard library changes, a new list should be created
|
||||
using the mkstdlibs.py script.
|
||||
"""
|
||||
|
||||
stdlib = {
|
||||
"_ast",
|
||||
"_thread",
|
||||
"abc",
|
||||
"aifc",
|
||||
"argparse",
|
||||
"array",
|
||||
"ast",
|
||||
"asynchat",
|
||||
"asyncio",
|
||||
"asyncore",
|
||||
"atexit",
|
||||
"audioop",
|
||||
"base64",
|
||||
"bdb",
|
||||
"binascii",
|
||||
"binhex",
|
||||
"bisect",
|
||||
"builtins",
|
||||
"bz2",
|
||||
"cProfile",
|
||||
"calendar",
|
||||
"cgi",
|
||||
"cgitb",
|
||||
"chunk",
|
||||
"cmath",
|
||||
"cmd",
|
||||
"code",
|
||||
"codecs",
|
||||
"codeop",
|
||||
"collections",
|
||||
"colorsys",
|
||||
"compileall",
|
||||
"concurrent",
|
||||
"configparser",
|
||||
"contextlib",
|
||||
"contextvars",
|
||||
"copy",
|
||||
"copyreg",
|
||||
"crypt",
|
||||
"csv",
|
||||
"ctypes",
|
||||
"curses",
|
||||
"dataclasses",
|
||||
"datetime",
|
||||
"dbm",
|
||||
"decimal",
|
||||
"difflib",
|
||||
"dis",
|
||||
"distutils",
|
||||
"doctest",
|
||||
"email",
|
||||
"encodings",
|
||||
"ensurepip",
|
||||
"enum",
|
||||
"errno",
|
||||
"faulthandler",
|
||||
"fcntl",
|
||||
"filecmp",
|
||||
"fileinput",
|
||||
"fnmatch",
|
||||
"formatter",
|
||||
"fractions",
|
||||
"ftplib",
|
||||
"functools",
|
||||
"gc",
|
||||
"getopt",
|
||||
"getpass",
|
||||
"gettext",
|
||||
"glob",
|
||||
"graphlib",
|
||||
"grp",
|
||||
"gzip",
|
||||
"hashlib",
|
||||
"heapq",
|
||||
"hmac",
|
||||
"html",
|
||||
"http",
|
||||
"imaplib",
|
||||
"imghdr",
|
||||
"imp",
|
||||
"importlib",
|
||||
"inspect",
|
||||
"io",
|
||||
"ipaddress",
|
||||
"itertools",
|
||||
"json",
|
||||
"keyword",
|
||||
"lib2to3",
|
||||
"linecache",
|
||||
"locale",
|
||||
"logging",
|
||||
"lzma",
|
||||
"mailbox",
|
||||
"mailcap",
|
||||
"marshal",
|
||||
"math",
|
||||
"mimetypes",
|
||||
"mmap",
|
||||
"modulefinder",
|
||||
"msilib",
|
||||
"msvcrt",
|
||||
"multiprocessing",
|
||||
"netrc",
|
||||
"nis",
|
||||
"nntplib",
|
||||
"ntpath",
|
||||
"numbers",
|
||||
"operator",
|
||||
"optparse",
|
||||
"os",
|
||||
"ossaudiodev",
|
||||
"parser",
|
||||
"pathlib",
|
||||
"pdb",
|
||||
"pickle",
|
||||
"pickletools",
|
||||
"pipes",
|
||||
"pkgutil",
|
||||
"platform",
|
||||
"plistlib",
|
||||
"poplib",
|
||||
"posix",
|
||||
"posixpath",
|
||||
"pprint",
|
||||
"profile",
|
||||
"pstats",
|
||||
"pty",
|
||||
"pwd",
|
||||
"py_compile",
|
||||
"pyclbr",
|
||||
"pydoc",
|
||||
"queue",
|
||||
"quopri",
|
||||
"random",
|
||||
"re",
|
||||
"readline",
|
||||
"reprlib",
|
||||
"resource",
|
||||
"rlcompleter",
|
||||
"runpy",
|
||||
"sched",
|
||||
"secrets",
|
||||
"select",
|
||||
"selectors",
|
||||
"shelve",
|
||||
"shlex",
|
||||
"shutil",
|
||||
"signal",
|
||||
"site",
|
||||
"smtpd",
|
||||
"smtplib",
|
||||
"sndhdr",
|
||||
"socket",
|
||||
"socketserver",
|
||||
"spwd",
|
||||
"sqlite3",
|
||||
"sre",
|
||||
"sre_compile",
|
||||
"sre_constants",
|
||||
"sre_parse",
|
||||
"ssl",
|
||||
"stat",
|
||||
"statistics",
|
||||
"string",
|
||||
"stringprep",
|
||||
"struct",
|
||||
"subprocess",
|
||||
"sunau",
|
||||
"symbol",
|
||||
"symtable",
|
||||
"sys",
|
||||
"sysconfig",
|
||||
"syslog",
|
||||
"tabnanny",
|
||||
"tarfile",
|
||||
"telnetlib",
|
||||
"tempfile",
|
||||
"termios",
|
||||
"test",
|
||||
"textwrap",
|
||||
"threading",
|
||||
"time",
|
||||
"timeit",
|
||||
"tkinter",
|
||||
"token",
|
||||
"tokenize",
|
||||
"trace",
|
||||
"traceback",
|
||||
"tracemalloc",
|
||||
"tty",
|
||||
"turtle",
|
||||
"turtledemo",
|
||||
"types",
|
||||
"typing",
|
||||
"unicodedata",
|
||||
"unittest",
|
||||
"urllib",
|
||||
"uu",
|
||||
"uuid",
|
||||
"venv",
|
||||
"warnings",
|
||||
"wave",
|
||||
"weakref",
|
||||
"webbrowser",
|
||||
"winreg",
|
||||
"winsound",
|
||||
"wsgiref",
|
||||
"xdrlib",
|
||||
"xml",
|
||||
"xmlrpc",
|
||||
"zipapp",
|
||||
"zipfile",
|
||||
"zipimport",
|
||||
"zlib",
|
||||
"zoneinfo",
|
||||
}
|
||||
72
.venv/lib/python3.8/site-packages/isort/utils.py
Normal file
72
.venv/lib/python3.8/site-packages/isort/utils.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
|
||||
class TrieNode:
|
||||
def __init__(self, config_file: str = "", config_data: Optional[Dict[str, Any]] = None) -> None:
|
||||
if not config_data:
|
||||
config_data = {}
|
||||
|
||||
self.nodes: Dict[str, TrieNode] = {}
|
||||
self.config_info: Tuple[str, Dict[str, Any]] = (config_file, config_data)
|
||||
|
||||
|
||||
class Trie:
|
||||
"""
|
||||
A prefix tree to store the paths of all config files and to search the nearest config
|
||||
associated with each file
|
||||
"""
|
||||
|
||||
def __init__(self, config_file: str = "", config_data: Optional[Dict[str, Any]] = None) -> None:
|
||||
self.root: TrieNode = TrieNode(config_file, config_data)
|
||||
|
||||
def insert(self, config_file: str, config_data: Dict[str, Any]) -> None:
|
||||
resolved_config_path_as_tuple = Path(config_file).parent.resolve().parts
|
||||
|
||||
temp = self.root
|
||||
|
||||
for path in resolved_config_path_as_tuple:
|
||||
if path not in temp.nodes:
|
||||
temp.nodes[path] = TrieNode()
|
||||
|
||||
temp = temp.nodes[path]
|
||||
|
||||
temp.config_info = (config_file, config_data)
|
||||
|
||||
def search(self, filename: str) -> Tuple[str, Dict[str, Any]]:
|
||||
"""
|
||||
Returns the closest config relative to filename by doing a depth
|
||||
first search on the prefix tree.
|
||||
"""
|
||||
resolved_file_path_as_tuple = Path(filename).resolve().parts
|
||||
|
||||
temp = self.root
|
||||
|
||||
last_stored_config: Tuple[str, Dict[str, Any]] = ("", {})
|
||||
|
||||
for path in resolved_file_path_as_tuple:
|
||||
if temp.config_info[0]:
|
||||
last_stored_config = temp.config_info
|
||||
|
||||
if path not in temp.nodes:
|
||||
break
|
||||
|
||||
temp = temp.nodes[path]
|
||||
|
||||
return last_stored_config
|
||||
|
||||
|
||||
def exists_case_sensitive(path: str) -> bool:
|
||||
"""Returns if the given path exists and also matches the case on Windows.
|
||||
|
||||
When finding files that can be imported, it is important for the cases to match because while
|
||||
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows,
|
||||
Python can only import using the case of the real file.
|
||||
"""
|
||||
result = os.path.exists(path)
|
||||
if (sys.platform.startswith("win") or sys.platform == "darwin") and result: # pragma: no cover
|
||||
directory, basename = os.path.split(path)
|
||||
result = basename in os.listdir(directory)
|
||||
return result
|
||||
139
.venv/lib/python3.8/site-packages/isort/wrap.py
Normal file
139
.venv/lib/python3.8/site-packages/isort/wrap.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
import copy
|
||||
import re
|
||||
from typing import List, Optional, Sequence
|
||||
|
||||
from .settings import DEFAULT_CONFIG, Config
|
||||
from .wrap_modes import WrapModes as Modes
|
||||
from .wrap_modes import formatter_from_string
|
||||
|
||||
|
||||
def import_statement(
|
||||
import_start: str,
|
||||
from_imports: List[str],
|
||||
comments: Sequence[str] = (),
|
||||
line_separator: str = "\n",
|
||||
config: Config = DEFAULT_CONFIG,
|
||||
multi_line_output: Optional[Modes] = None,
|
||||
) -> str:
|
||||
"""Returns a multi-line wrapped form of the provided from import statement."""
|
||||
formatter = formatter_from_string((multi_line_output or config.multi_line_output).name)
|
||||
dynamic_indent = " " * (len(import_start) + 1)
|
||||
indent = config.indent
|
||||
line_length = config.wrap_length or config.line_length
|
||||
statement = formatter(
|
||||
statement=import_start,
|
||||
imports=copy.copy(from_imports),
|
||||
white_space=dynamic_indent,
|
||||
indent=indent,
|
||||
line_length=line_length,
|
||||
comments=comments,
|
||||
line_separator=line_separator,
|
||||
comment_prefix=config.comment_prefix,
|
||||
include_trailing_comma=config.include_trailing_comma,
|
||||
remove_comments=config.ignore_comments,
|
||||
)
|
||||
if config.balanced_wrapping:
|
||||
lines = statement.split(line_separator)
|
||||
line_count = len(lines)
|
||||
if len(lines) > 1:
|
||||
minimum_length = min(len(line) for line in lines[:-1])
|
||||
else:
|
||||
minimum_length = 0
|
||||
new_import_statement = statement
|
||||
while len(lines[-1]) < minimum_length and len(lines) == line_count and line_length > 10:
|
||||
statement = new_import_statement
|
||||
line_length -= 1
|
||||
new_import_statement = formatter(
|
||||
statement=import_start,
|
||||
imports=copy.copy(from_imports),
|
||||
white_space=dynamic_indent,
|
||||
indent=indent,
|
||||
line_length=line_length,
|
||||
comments=comments,
|
||||
line_separator=line_separator,
|
||||
comment_prefix=config.comment_prefix,
|
||||
include_trailing_comma=config.include_trailing_comma,
|
||||
remove_comments=config.ignore_comments,
|
||||
)
|
||||
lines = new_import_statement.split(line_separator)
|
||||
if statement.count(line_separator) == 0:
|
||||
return _wrap_line(statement, line_separator, config)
|
||||
return statement
|
||||
|
||||
|
||||
def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> str:
|
||||
"""Returns a line wrapped to the specified line-length, if possible."""
|
||||
wrap_mode = config.multi_line_output
|
||||
if len(content) > config.line_length and wrap_mode != Modes.NOQA: # type: ignore
|
||||
line_without_comment = content
|
||||
comment = None
|
||||
if "#" in content:
|
||||
line_without_comment, comment = content.split("#", 1)
|
||||
for splitter in ("import ", ".", "as "):
|
||||
exp = r"\b" + re.escape(splitter) + r"\b"
|
||||
if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith(
|
||||
splitter
|
||||
):
|
||||
line_parts = re.split(exp, line_without_comment)
|
||||
if comment and not (config.use_parentheses and "noqa" in comment):
|
||||
_comma_maybe = (
|
||||
","
|
||||
if (
|
||||
config.include_trailing_comma
|
||||
and config.use_parentheses
|
||||
and not line_without_comment.rstrip().endswith(",")
|
||||
)
|
||||
else ""
|
||||
)
|
||||
line_parts[
|
||||
-1
|
||||
] = f"{line_parts[-1].strip()}{_comma_maybe}{config.comment_prefix}{comment}"
|
||||
next_line = []
|
||||
while (len(content) + 2) > (
|
||||
config.wrap_length or config.line_length
|
||||
) and line_parts:
|
||||
next_line.append(line_parts.pop())
|
||||
content = splitter.join(line_parts)
|
||||
if not content:
|
||||
content = next_line.pop()
|
||||
|
||||
cont_line = _wrap_line(
|
||||
config.indent + splitter.join(next_line).lstrip(),
|
||||
line_separator,
|
||||
config,
|
||||
)
|
||||
if config.use_parentheses:
|
||||
if splitter == "as ":
|
||||
output = f"{content}{splitter}{cont_line.lstrip()}"
|
||||
else:
|
||||
_comma = "," if config.include_trailing_comma and not comment else ""
|
||||
|
||||
if wrap_mode in (
|
||||
Modes.VERTICAL_HANGING_INDENT, # type: ignore
|
||||
Modes.VERTICAL_GRID_GROUPED, # type: ignore
|
||||
):
|
||||
_separator = line_separator
|
||||
else:
|
||||
_separator = ""
|
||||
_comment = ""
|
||||
if comment and "noqa" in comment:
|
||||
_comment = f"{config.comment_prefix}{comment}"
|
||||
cont_line = cont_line.rstrip()
|
||||
_comma = "," if config.include_trailing_comma else ""
|
||||
output = (
|
||||
f"{content}{splitter}({_comment}"
|
||||
f"{line_separator}{cont_line}{_comma}{_separator})"
|
||||
)
|
||||
lines = output.split(line_separator)
|
||||
if config.comment_prefix in lines[-1] and lines[-1].endswith(")"):
|
||||
content, comment = lines[-1].split(config.comment_prefix, 1)
|
||||
lines[-1] = content + ")" + config.comment_prefix + comment[:-1]
|
||||
return line_separator.join(lines)
|
||||
return f"{content}{splitter}\\{line_separator}{cont_line}"
|
||||
elif len(content) > config.line_length and wrap_mode == Modes.NOQA and "# NOQA" not in content: # type: ignore
|
||||
return f"{content}{config.comment_prefix} NOQA"
|
||||
|
||||
return content
|
||||
|
||||
|
||||
_wrap_line = line
|
||||
376
.venv/lib/python3.8/site-packages/isort/wrap_modes.py
Normal file
376
.venv/lib/python3.8/site-packages/isort/wrap_modes.py
Normal file
|
|
@ -0,0 +1,376 @@
|
|||
"""Defines all wrap modes that can be used when outputting formatted imports"""
|
||||
import enum
|
||||
from inspect import signature
|
||||
from typing import Any, Callable, Dict, List
|
||||
|
||||
import isort.comments
|
||||
|
||||
_wrap_modes: Dict[str, Callable[..., str]] = {}
|
||||
|
||||
|
||||
def from_string(value: str) -> "WrapModes":
|
||||
return getattr(WrapModes, str(value), None) or WrapModes(int(value))
|
||||
|
||||
|
||||
def formatter_from_string(name: str) -> Callable[..., str]:
|
||||
return _wrap_modes.get(name.upper(), grid)
|
||||
|
||||
|
||||
def _wrap_mode_interface(
|
||||
statement: str,
|
||||
imports: List[str],
|
||||
white_space: str,
|
||||
indent: str,
|
||||
line_length: int,
|
||||
comments: List[str],
|
||||
line_separator: str,
|
||||
comment_prefix: str,
|
||||
include_trailing_comma: bool,
|
||||
remove_comments: bool,
|
||||
) -> str:
|
||||
"""Defines the common interface used by all wrap mode functions"""
|
||||
return ""
|
||||
|
||||
|
||||
def _wrap_mode(function: Callable[..., str]) -> Callable[..., str]:
|
||||
"""Registers an individual wrap mode. Function name and order are significant and used for
|
||||
creating enum.
|
||||
"""
|
||||
_wrap_modes[function.__name__.upper()] = function
|
||||
function.__signature__ = signature(_wrap_mode_interface) # type: ignore
|
||||
function.__annotations__ = _wrap_mode_interface.__annotations__
|
||||
return function
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def grid(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
interface["statement"] += "(" + interface["imports"].pop(0)
|
||||
while interface["imports"]:
|
||||
next_import = interface["imports"].pop(0)
|
||||
next_statement = isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["statement"] + ", " + next_import,
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
if (
|
||||
len(next_statement.split(interface["line_separator"])[-1]) + 1
|
||||
> interface["line_length"]
|
||||
):
|
||||
lines = [f"{interface['white_space']}{next_import.split(' ')[0]}"]
|
||||
for part in next_import.split(" ")[1:]:
|
||||
new_line = f"{lines[-1]} {part}"
|
||||
if len(new_line) + 1 > interface["line_length"]:
|
||||
lines.append(f"{interface['white_space']}{part}")
|
||||
else:
|
||||
lines[-1] = new_line
|
||||
next_import = interface["line_separator"].join(lines)
|
||||
interface["statement"] = (
|
||||
isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
f"{interface['statement']},",
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ f"{interface['line_separator']}{next_import}"
|
||||
)
|
||||
interface["comments"] = []
|
||||
else:
|
||||
interface["statement"] += ", " + next_import
|
||||
return f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''})"
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
first_import = (
|
||||
isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["imports"].pop(0) + ",",
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ interface["line_separator"]
|
||||
+ interface["white_space"]
|
||||
)
|
||||
|
||||
_imports = ("," + interface["line_separator"] + interface["white_space"]).join(
|
||||
interface["imports"]
|
||||
)
|
||||
_comma_maybe = "," if interface["include_trailing_comma"] else ""
|
||||
return f"{interface['statement']}({first_import}{_imports}{_comma_maybe})"
|
||||
|
||||
|
||||
def _hanging_indent_end_line(line: str) -> str:
|
||||
if not line.endswith(" "):
|
||||
line += " "
|
||||
return line + "\\"
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def hanging_indent(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
line_length_limit = interface["line_length"] - 3
|
||||
|
||||
next_import = interface["imports"].pop(0)
|
||||
next_statement = interface["statement"] + next_import
|
||||
# Check for first import
|
||||
if len(next_statement) > line_length_limit:
|
||||
next_statement = (
|
||||
_hanging_indent_end_line(interface["statement"])
|
||||
+ interface["line_separator"]
|
||||
+ interface["indent"]
|
||||
+ next_import
|
||||
)
|
||||
|
||||
interface["statement"] = next_statement
|
||||
while interface["imports"]:
|
||||
next_import = interface["imports"].pop(0)
|
||||
next_statement = interface["statement"] + ", " + next_import
|
||||
if len(next_statement.split(interface["line_separator"])[-1]) > line_length_limit:
|
||||
next_statement = (
|
||||
_hanging_indent_end_line(interface["statement"] + ",")
|
||||
+ f"{interface['line_separator']}{interface['indent']}{next_import}"
|
||||
)
|
||||
interface["statement"] = next_statement
|
||||
|
||||
interface[
|
||||
"statement"
|
||||
] = f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''}"
|
||||
if interface["comments"]:
|
||||
statement_with_comments = isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["statement"],
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
if len(statement_with_comments.split(interface["line_separator"])[-1]) <= (
|
||||
line_length_limit + 2
|
||||
):
|
||||
return statement_with_comments
|
||||
return (
|
||||
_hanging_indent_end_line(interface["statement"])
|
||||
+ str(interface["line_separator"])
|
||||
+ isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["indent"],
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"].lstrip(),
|
||||
)
|
||||
)
|
||||
return str(interface["statement"])
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_hanging_indent(**interface: Any) -> str:
|
||||
_line_with_comments = isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
"",
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
_imports = ("," + interface["line_separator"] + interface["indent"]).join(interface["imports"])
|
||||
_comma_maybe = "," if interface["include_trailing_comma"] else ""
|
||||
return (
|
||||
f"{interface['statement']}({_line_with_comments}{interface['line_separator']}"
|
||||
f"{interface['indent']}{_imports}{_comma_maybe}{interface['line_separator']})"
|
||||
)
|
||||
|
||||
|
||||
def _vertical_grid_common(need_trailing_char: bool, **interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
interface["statement"] += (
|
||||
isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
"(",
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ interface["line_separator"]
|
||||
+ interface["indent"]
|
||||
+ interface["imports"].pop(0)
|
||||
)
|
||||
while interface["imports"]:
|
||||
next_import = interface["imports"].pop(0)
|
||||
next_statement = f"{interface['statement']}, {next_import}"
|
||||
current_line_length = len(next_statement.split(interface["line_separator"])[-1])
|
||||
if interface["imports"] or interface["include_trailing_comma"]:
|
||||
# We need to account for a comma after this import.
|
||||
current_line_length += 1
|
||||
if not interface["imports"] and need_trailing_char:
|
||||
# We need to account for a closing ) we're going to add.
|
||||
current_line_length += 1
|
||||
if current_line_length > interface["line_length"]:
|
||||
next_statement = (
|
||||
f"{interface['statement']},{interface['line_separator']}"
|
||||
f"{interface['indent']}{next_import}"
|
||||
)
|
||||
interface["statement"] = next_statement
|
||||
if interface["include_trailing_comma"]:
|
||||
interface["statement"] += ","
|
||||
return str(interface["statement"])
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_grid(**interface: Any) -> str:
|
||||
return _vertical_grid_common(need_trailing_char=True, **interface) + ")"
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_grid_grouped(**interface: Any) -> str:
|
||||
return (
|
||||
_vertical_grid_common(need_trailing_char=False, **interface)
|
||||
+ str(interface["line_separator"])
|
||||
+ ")"
|
||||
)
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_grid_grouped_no_comma(**interface: Any) -> str:
|
||||
# This is a deprecated alias for vertical_grid_grouped above. This function
|
||||
# needs to exist for backwards compatibility but should never get called.
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def noqa(**interface: Any) -> str:
|
||||
_imports = ", ".join(interface["imports"])
|
||||
retval = f"{interface['statement']}{_imports}"
|
||||
comment_str = " ".join(interface["comments"])
|
||||
if interface["comments"]:
|
||||
if (
|
||||
len(retval) + len(interface["comment_prefix"]) + 1 + len(comment_str)
|
||||
<= interface["line_length"]
|
||||
):
|
||||
return f"{retval}{interface['comment_prefix']} {comment_str}"
|
||||
if "NOQA" in interface["comments"]:
|
||||
return f"{retval}{interface['comment_prefix']} {comment_str}"
|
||||
return f"{retval}{interface['comment_prefix']} NOQA {comment_str}"
|
||||
|
||||
if len(retval) <= interface["line_length"]:
|
||||
return retval
|
||||
return f"{retval}{interface['comment_prefix']} NOQA"
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_hanging_indent_bracket(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
statement = vertical_hanging_indent(**interface)
|
||||
return f'{statement[:-1]}{interface["indent"]})'
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def vertical_prefix_from_module_import(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
prefix_statement = interface["statement"]
|
||||
output_statement = prefix_statement + interface["imports"].pop(0)
|
||||
comments = interface["comments"]
|
||||
|
||||
statement = output_statement
|
||||
statement_with_comments = ""
|
||||
for next_import in interface["imports"]:
|
||||
statement = statement + ", " + next_import
|
||||
statement_with_comments = isort.comments.add_to_line(
|
||||
comments,
|
||||
statement,
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
if (
|
||||
len(statement_with_comments.split(interface["line_separator"])[-1]) + 1
|
||||
> interface["line_length"]
|
||||
):
|
||||
statement = (
|
||||
isort.comments.add_to_line(
|
||||
comments,
|
||||
output_statement,
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ f"{interface['line_separator']}{prefix_statement}{next_import}"
|
||||
)
|
||||
comments = []
|
||||
output_statement = statement
|
||||
|
||||
if comments and statement_with_comments:
|
||||
output_statement = statement_with_comments
|
||||
return str(output_statement)
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def hanging_indent_with_parentheses(**interface: Any) -> str:
|
||||
if not interface["imports"]:
|
||||
return ""
|
||||
|
||||
line_length_limit = interface["line_length"] - 1
|
||||
|
||||
interface["statement"] += "("
|
||||
next_import = interface["imports"].pop(0)
|
||||
next_statement = interface["statement"] + next_import
|
||||
# Check for first import
|
||||
if len(next_statement) > line_length_limit:
|
||||
next_statement = (
|
||||
isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["statement"],
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ f"{interface['line_separator']}{interface['indent']}{next_import}"
|
||||
)
|
||||
interface["comments"] = []
|
||||
interface["statement"] = next_statement
|
||||
while interface["imports"]:
|
||||
next_import = interface["imports"].pop(0)
|
||||
if (
|
||||
not interface["line_separator"] in interface["statement"]
|
||||
and "#" in interface["statement"]
|
||||
): # pragma: no cover # TODO: fix, this is because of test run inconsistency.
|
||||
line, comments = interface["statement"].split("#", 1)
|
||||
next_statement = (
|
||||
f"{line.rstrip()}, {next_import}{interface['comment_prefix']}{comments}"
|
||||
)
|
||||
else:
|
||||
next_statement = isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["statement"] + ", " + next_import,
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
current_line = next_statement.split(interface["line_separator"])[-1]
|
||||
if len(current_line) > line_length_limit:
|
||||
next_statement = (
|
||||
isort.comments.add_to_line(
|
||||
interface["comments"],
|
||||
interface["statement"] + ",",
|
||||
removed=interface["remove_comments"],
|
||||
comment_prefix=interface["comment_prefix"],
|
||||
)
|
||||
+ f"{interface['line_separator']}{interface['indent']}{next_import}"
|
||||
)
|
||||
interface["comments"] = []
|
||||
interface["statement"] = next_statement
|
||||
return f"{interface['statement']}{',' if interface['include_trailing_comma'] else ''})"
|
||||
|
||||
|
||||
@_wrap_mode
|
||||
def backslash_grid(**interface: Any) -> str:
|
||||
interface["indent"] = interface["white_space"][:-1]
|
||||
return hanging_indent(**interface)
|
||||
|
||||
|
||||
WrapModes = enum.Enum( # type: ignore
|
||||
"WrapModes", {wrap_mode: index for index, wrap_mode in enumerate(_wrap_modes.keys())}
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue