This commit is contained in:
Waylon Walker 2022-03-31 20:20:07 -05:00
commit 38355d2442
No known key found for this signature in database
GPG key ID: 66E2BF2B4190EFE4
9083 changed files with 1225834 additions and 0 deletions

View file

@ -0,0 +1,17 @@
import os.path
provided_prefix = os.getenv('MYPY_TEST_PREFIX', None)
if provided_prefix:
PREFIX = provided_prefix
else:
this_file_dir = os.path.dirname(os.path.realpath(__file__))
PREFIX = os.path.dirname(os.path.dirname(this_file_dir))
# Location of test data files such as test case descriptions.
test_data_prefix = os.path.join(PREFIX, 'test-data', 'unit')
package_path = os.path.join(PREFIX, 'test-data', 'packages')
# Temp directory used for the temp files created when running test cases.
# This is *within* the tempfile.TemporaryDirectory that is chroot'ed per testcase.
# It is also hard-coded in numerous places, so don't change it.
test_temp_dir = 'tmp'

View file

@ -0,0 +1,705 @@
"""Utilities for processing .test files containing test case descriptions."""
import os.path
import os
import tempfile
import posixpath
import re
import shutil
from abc import abstractmethod
import sys
import pytest
from typing import List, Tuple, Set, Optional, Iterator, Any, Dict, NamedTuple, Union
from mypy.test.config import test_data_prefix, test_temp_dir, PREFIX
root_dir = os.path.normpath(PREFIX)
# File modify/create operation: copy module contents from source_path.
UpdateFile = NamedTuple('UpdateFile', [('module', str),
('content', str),
('target_path', str)])
# File delete operation: delete module file.
DeleteFile = NamedTuple('DeleteFile', [('module', str),
('path', str)])
FileOperation = Union[UpdateFile, DeleteFile]
def parse_test_case(case: 'DataDrivenTestCase') -> None:
"""Parse and prepare a single case from suite with test case descriptions.
This method is part of the setup phase, just before the test case is run.
"""
test_items = parse_test_data(case.data, case.name)
base_path = case.suite.base_path
if case.suite.native_sep:
join = os.path.join
else:
join = posixpath.join
out_section_missing = case.suite.required_out_section
normalize_output = True
files: List[Tuple[str, str]] = [] # path and contents
output_files: List[Tuple[str, str]] = [] # path and contents for output files
output: List[str] = [] # Regular output errors
output2: Dict[int, List[str]] = {} # Output errors for incremental, runs 2+
deleted_paths: Dict[int, Set[str]] = {} # from run number of paths
stale_modules: Dict[int, Set[str]] = {} # from run number to module names
rechecked_modules: Dict[int, Set[str]] = {} # from run number module names
triggered: List[str] = [] # Active triggers (one line per incremental step)
targets: Dict[int, List[str]] = {} # Fine-grained targets (per fine-grained update)
# Process the parsed items. Each item has a header of form [id args],
# optionally followed by lines of text.
item = first_item = test_items[0]
for item in test_items[1:]:
if item.id == 'file' or item.id == 'outfile':
# Record an extra file needed for the test case.
assert item.arg is not None
contents = expand_variables('\n'.join(item.data))
file_entry = (join(base_path, item.arg), contents)
if item.id == 'file':
files.append(file_entry)
else:
output_files.append(file_entry)
elif item.id in ('builtins', 'builtins_py2'):
# Use an alternative stub file for the builtins module.
assert item.arg is not None
mpath = join(os.path.dirname(case.file), item.arg)
fnam = 'builtins.pyi' if item.id == 'builtins' else '__builtin__.pyi'
with open(mpath, encoding='utf8') as f:
files.append((join(base_path, fnam), f.read()))
elif item.id == 'typing':
# Use an alternative stub file for the typing module.
assert item.arg is not None
src_path = join(os.path.dirname(case.file), item.arg)
with open(src_path, encoding='utf8') as f:
files.append((join(base_path, 'typing.pyi'), f.read()))
elif re.match(r'stale[0-9]*$', item.id):
passnum = 1 if item.id == 'stale' else int(item.id[len('stale'):])
assert passnum > 0
modules = (set() if item.arg is None else {t.strip() for t in item.arg.split(',')})
stale_modules[passnum] = modules
elif re.match(r'rechecked[0-9]*$', item.id):
passnum = 1 if item.id == 'rechecked' else int(item.id[len('rechecked'):])
assert passnum > 0
modules = (set() if item.arg is None else {t.strip() for t in item.arg.split(',')})
rechecked_modules[passnum] = modules
elif re.match(r'targets[0-9]*$', item.id):
passnum = 1 if item.id == 'targets' else int(item.id[len('targets'):])
assert passnum > 0
reprocessed = [] if item.arg is None else [t.strip() for t in item.arg.split(',')]
targets[passnum] = reprocessed
elif item.id == 'delete':
# File/directory to delete during a multi-step test case
assert item.arg is not None
m = re.match(r'(.*)\.([0-9]+)$', item.arg)
assert m, 'Invalid delete section: {}'.format(item.arg)
num = int(m.group(2))
assert num >= 2, "Can't delete during step {}".format(num)
full = join(base_path, m.group(1))
deleted_paths.setdefault(num, set()).add(full)
elif re.match(r'out[0-9]*$', item.id):
if item.arg is None:
args = []
else:
args = item.arg.split(",")
version_check = True
for arg in args:
if arg == 'skip-path-normalization':
normalize_output = False
if arg.startswith("version"):
compare_op = arg[7:9]
if compare_op not in {">=", "=="}:
raise ValueError(
"{}, line {}: Only >= and == version checks are currently supported"
.format(
case.file, item.line
)
)
version_str = arg[9:]
try:
version = tuple(int(x) for x in version_str.split("."))
except ValueError:
raise ValueError(
'{}, line {}: "{}" is not a valid python version'.format(
case.file, item.line, version_str))
if compare_op == ">=":
version_check = sys.version_info >= version
elif compare_op == "==":
if not 1 < len(version) < 4:
raise ValueError(
'{}, line {}: Only minor or patch version checks '
'are currently supported with "==": "{}"'.format(
case.file, item.line, version_str
)
)
version_check = sys.version_info[:len(version)] == version
if version_check:
tmp_output = [expand_variables(line) for line in item.data]
if os.path.sep == '\\' and normalize_output:
tmp_output = [fix_win_path(line) for line in tmp_output]
if item.id == 'out' or item.id == 'out1':
output = tmp_output
else:
passnum = int(item.id[len('out'):])
assert passnum > 1
output2[passnum] = tmp_output
out_section_missing = False
elif item.id == 'triggered' and item.arg is None:
triggered = item.data
else:
raise ValueError(
'Invalid section header {} in {} at line {}'.format(
item.id, case.file, item.line))
if out_section_missing:
raise ValueError(
'{}, line {}: Required output section not found'.format(
case.file, first_item.line))
for passnum in stale_modules.keys():
if passnum not in rechecked_modules:
# If the set of rechecked modules isn't specified, make it the same as the set
# of modules with a stale public interface.
rechecked_modules[passnum] = stale_modules[passnum]
if (passnum in stale_modules
and passnum in rechecked_modules
and not stale_modules[passnum].issubset(rechecked_modules[passnum])):
raise ValueError(
('Stale modules after pass {} must be a subset of rechecked '
'modules ({}:{})').format(passnum, case.file, first_item.line))
input = first_item.data
expand_errors(input, output, 'main')
for file_path, contents in files:
expand_errors(contents.split('\n'), output, file_path)
case.input = input
case.output = output
case.output2 = output2
case.last_line = case.line + item.line + len(item.data) - 2
case.files = files
case.output_files = output_files
case.expected_stale_modules = stale_modules
case.expected_rechecked_modules = rechecked_modules
case.deleted_paths = deleted_paths
case.triggered = triggered or []
case.normalize_output = normalize_output
case.expected_fine_grained_targets = targets
class DataDrivenTestCase(pytest.Item):
"""Holds parsed data-driven test cases, and handles directory setup and teardown."""
# Override parent member type
parent: "DataSuiteCollector"
input: List[str]
output: List[str] # Output for the first pass
output2: Dict[int, List[str]] # Output for runs 2+, indexed by run number
# full path of test suite
file = ''
line = 0
# (file path, file content) tuples
files: List[Tuple[str, str]]
expected_stale_modules: Dict[int, Set[str]]
expected_rechecked_modules: Dict[int, Set[str]]
expected_fine_grained_targets: Dict[int, List[str]]
# Whether or not we should normalize the output to standardize things like
# forward vs backward slashes in file paths for Windows vs Linux.
normalize_output = True
# Extra attributes used by some tests.
last_line: int
output_files: List[Tuple[str, str]] # Path and contents for output files
deleted_paths: Dict[int, Set[str]] # Mapping run number -> paths
triggered: List[str] # Active triggers (one line per incremental step)
def __init__(self,
parent: 'DataSuiteCollector',
suite: 'DataSuite',
file: str,
name: str,
writescache: bool,
only_when: str,
platform: Optional[str],
skip: bool,
xfail: bool,
data: str,
line: int) -> None:
super().__init__(name, parent)
self.suite = suite
self.file = file
self.writescache = writescache
self.only_when = only_when
if ((platform == 'windows' and sys.platform != 'win32')
or (platform == 'posix' and sys.platform == 'win32')):
skip = True
self.skip = skip
self.xfail = xfail
self.data = data
self.line = line
self.old_cwd: Optional[str] = None
self.tmpdir: Optional[tempfile.TemporaryDirectory[str]] = None
def runtest(self) -> None:
if self.skip:
pytest.skip()
# TODO: add a better error message for when someone uses skip and xfail at the same time
elif self.xfail:
self.add_marker(pytest.mark.xfail)
parent = self.getparent(DataSuiteCollector)
assert parent is not None, 'Should not happen'
suite = parent.obj()
suite.setup()
try:
suite.run_case(self)
except Exception:
# As a debugging aid, support copying the contents of the tmp directory somewhere
save_dir: Optional[str] = self.config.getoption("--save-failures-to", None)
if save_dir:
assert self.tmpdir is not None
target_dir = os.path.join(save_dir, os.path.basename(self.tmpdir.name))
print("Copying data from test {} to {}".format(self.name, target_dir))
if not os.path.isabs(target_dir):
assert self.old_cwd
target_dir = os.path.join(self.old_cwd, target_dir)
shutil.copytree(self.tmpdir.name, target_dir)
raise
def setup(self) -> None:
parse_test_case(case=self)
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.TemporaryDirectory(prefix='mypy-test-')
os.chdir(self.tmpdir.name)
os.mkdir(test_temp_dir)
# Precalculate steps for find_steps()
steps: Dict[int, List[FileOperation]] = {}
for path, content in self.files:
m = re.match(r'.*\.([0-9]+)$', path)
if m:
# Skip writing subsequent incremental steps - rather
# store them as operations.
num = int(m.group(1))
assert num >= 2
target_path = re.sub(r'\.[0-9]+$', '', path)
module = module_from_path(target_path)
operation = UpdateFile(module, content, target_path)
steps.setdefault(num, []).append(operation)
else:
# Write the first incremental steps
dir = os.path.dirname(path)
os.makedirs(dir, exist_ok=True)
with open(path, 'w', encoding='utf8') as f:
f.write(content)
for num, paths in self.deleted_paths.items():
assert num >= 2
for path in paths:
module = module_from_path(path)
steps.setdefault(num, []).append(DeleteFile(module, path))
max_step = max(steps) if steps else 2
self.steps = [steps.get(num, []) for num in range(2, max_step + 1)]
def teardown(self) -> None:
assert self.old_cwd is not None and self.tmpdir is not None, \
"test was not properly set up"
os.chdir(self.old_cwd)
try:
self.tmpdir.cleanup()
except OSError:
pass
self.old_cwd = None
self.tmpdir = None
def reportinfo(self) -> Tuple[str, int, str]:
return self.file, self.line, self.name
def repr_failure(self, excinfo: Any, style: Optional[Any] = None) -> str:
if excinfo.errisinstance(SystemExit):
# We assume that before doing exit() (which raises SystemExit) we've printed
# enough context about what happened so that a stack trace is not useful.
# In particular, uncaught exceptions during semantic analysis or type checking
# call exit() and they already print out a stack trace.
excrepr = excinfo.exconly()
else:
self.parent._prunetraceback(excinfo)
excrepr = excinfo.getrepr(style='short')
return "data: {}:{}:\n{}".format(self.file, self.line, excrepr)
def find_steps(self) -> List[List[FileOperation]]:
"""Return a list of descriptions of file operations for each incremental step.
The first list item corresponds to the first incremental step, the second for the
second step, etc. Each operation can either be a file modification/creation (UpdateFile)
or deletion (DeleteFile).
Defaults to having two steps if there aern't any operations.
"""
return self.steps
def module_from_path(path: str) -> str:
path = re.sub(r'\.pyi?$', '', path)
# We can have a mix of Unix-style and Windows-style separators.
parts = re.split(r'[/\\]', path)
del parts[0]
module = '.'.join(parts)
module = re.sub(r'\.__init__$', '', module)
return module
class TestItem:
"""Parsed test caseitem.
An item is of the form
[id arg]
.. data ..
"""
id = ""
arg: Optional[str] = ""
# Text data, array of 8-bit strings
data: List[str]
file = ''
line = 0 # Line number in file
def __init__(self, id: str, arg: Optional[str], data: List[str],
line: int) -> None:
self.id = id
self.arg = arg
self.data = data
self.line = line
def parse_test_data(raw_data: str, name: str) -> List[TestItem]:
"""Parse a list of lines that represent a sequence of test items."""
lines = ['', '[case ' + name + ']'] + raw_data.split('\n')
ret: List[TestItem] = []
data: List[str] = []
id: Optional[str] = None
arg: Optional[str] = None
i = 0
i0 = 0
while i < len(lines):
s = lines[i].strip()
if lines[i].startswith('[') and s.endswith(']'):
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, strip_list(data), i0 + 1))
i0 = i
id = s[1:-1]
arg = None
if ' ' in id:
arg = id[id.index(' ') + 1:]
id = id[:id.index(' ')]
data = []
elif lines[i].startswith('\\['):
data.append(lines[i][1:])
elif not lines[i].startswith('--'):
data.append(lines[i])
elif lines[i].startswith('----'):
data.append(lines[i][2:])
i += 1
# Process the last item.
if id:
data = collapse_line_continuation(data)
data = strip_list(data)
ret.append(TestItem(id, arg, data, i0 + 1))
return ret
def strip_list(l: List[str]) -> List[str]:
"""Return a stripped copy of l.
Strip whitespace at the end of all lines, and strip all empty
lines from the end of the array.
"""
r: List[str] = []
for s in l:
# Strip spaces at end of line
r.append(re.sub(r'\s+$', '', s))
while len(r) > 0 and r[-1] == '':
r.pop()
return r
def collapse_line_continuation(l: List[str]) -> List[str]:
r: List[str] = []
cont = False
for s in l:
ss = re.sub(r'\\$', '', s)
if cont:
r[-1] += re.sub('^ +', '', ss)
else:
r.append(ss)
cont = s.endswith('\\')
return r
def expand_variables(s: str) -> str:
return s.replace('<ROOT>', root_dir)
def expand_errors(input: List[str], output: List[str], fnam: str) -> None:
"""Transform comments such as '# E: message' or
'# E:3: message' in input.
The result is lines like 'fnam:line: error: message'.
"""
for i in range(len(input)):
# The first in the split things isn't a comment
for possible_err_comment in input[i].split(' # ')[1:]:
m = re.search(
r'^([ENW]):((?P<col>\d+):)? (?P<message>.*)$',
possible_err_comment.strip())
if m:
if m.group(1) == 'E':
severity = 'error'
elif m.group(1) == 'N':
severity = 'note'
elif m.group(1) == 'W':
severity = 'warning'
col = m.group('col')
message = m.group('message')
message = message.replace('\\#', '#') # adds back escaped # character
if col is None:
output.append(
'{}:{}: {}: {}'.format(fnam, i + 1, severity, message))
else:
output.append('{}:{}:{}: {}: {}'.format(
fnam, i + 1, col, severity, message))
def fix_win_path(line: str) -> str:
r"""Changes Windows paths to Linux paths in error messages.
E.g. foo\bar.py -> foo/bar.py.
"""
line = line.replace(root_dir, root_dir.replace('\\', '/'))
m = re.match(r'^([\S/]+):(\d+:)?(\s+.*)', line)
if not m:
return line
else:
filename, lineno, message = m.groups()
return '{}:{}{}'.format(filename.replace('\\', '/'),
lineno or '', message)
def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return '{}{}{}'.format(line[:m.start(1)],
m.group('filename').replace('\\', '/'),
line[m.end(1):])
##
#
# pytest setup
#
##
# This function name is special to pytest. See
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
def pytest_addoption(parser: Any) -> None:
group = parser.getgroup('mypy')
group.addoption('--update-data', action='store_true', default=False,
help='Update test data to reflect actual output'
' (supported only for certain tests)')
group.addoption('--save-failures-to', default=None,
help='Copy the temp directories from failing tests to a target directory')
group.addoption('--mypy-verbose', action='count',
help='Set the verbose flag when creating mypy Options')
group.addoption('--mypyc-showc', action='store_true', default=False,
help='Display C code on mypyc test failures')
# This function name is special to pytest. See
# http://doc.pytest.org/en/latest/writing_plugins.html#collection-hooks
def pytest_pycollect_makeitem(collector: Any, name: str,
obj: object) -> 'Optional[Any]':
"""Called by pytest on each object in modules configured in conftest.py files.
collector is pytest.Collector, returns Optional[pytest.Class]
"""
if isinstance(obj, type):
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
if issubclass(obj, DataSuite) and obj is not DataSuite:
# Non-None result means this obj is a test case.
# The collect method of the returned DataSuiteCollector instance will be called later,
# with self.obj being obj.
return DataSuiteCollector.from_parent( # type: ignore[no-untyped-call]
parent=collector, name=name,
)
return None
def split_test_cases(parent: 'DataFileCollector', suite: 'DataSuite',
file: str) -> Iterator['DataDrivenTestCase']:
"""Iterate over raw test cases in file, at collection time, ignoring sub items.
The collection phase is slow, so any heavy processing should be deferred to after
uninteresting tests are filtered (when using -k PATTERN switch).
"""
with open(file, encoding='utf-8') as f:
data = f.read()
# number of groups in the below regex
NUM_GROUPS = 7
cases = re.split(r'^\[case ([a-zA-Z_0-9]+)'
r'(-writescache)?'
r'(-only_when_cache|-only_when_nocache)?'
r'(-posix|-windows)?'
r'(-skip)?'
r'(-xfail)?'
r'\][ \t]*$\n',
data,
flags=re.DOTALL | re.MULTILINE)
line_no = cases[0].count('\n') + 1
test_names = set()
for i in range(1, len(cases), NUM_GROUPS):
name, writescache, only_when, platform_flag, skip, xfail, data = cases[i:i + NUM_GROUPS]
if name in test_names:
raise RuntimeError('Found a duplicate test name "{}" in {} on line {}'.format(
name, parent.name, line_no,
))
platform = platform_flag[1:] if platform_flag else None
yield DataDrivenTestCase.from_parent(
parent=parent,
suite=suite,
file=file,
name=add_test_name_suffix(name, suite.test_name_suffix),
writescache=bool(writescache),
only_when=only_when,
platform=platform,
skip=bool(skip),
xfail=bool(xfail),
data=data,
line=line_no,
)
line_no += data.count('\n') + 1
# Record existing tests to prevent duplicates:
test_names.update({name})
class DataSuiteCollector(pytest.Class):
def collect(self) -> Iterator['DataFileCollector']:
"""Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
# obj is the object for which pytest_pycollect_makeitem returned self.
suite: DataSuite = self.obj
assert os.path.isdir(suite.data_prefix), \
'Test data prefix ({}) not set correctly'.format(suite.data_prefix)
for data_file in suite.files:
yield DataFileCollector.from_parent(parent=self, name=data_file)
class DataFileCollector(pytest.Collector):
"""Represents a single `.test` data driven test file.
More context: https://github.com/python/mypy/issues/11662
"""
parent: DataSuiteCollector
@classmethod # We have to fight with pytest here:
def from_parent( # type: ignore[override]
cls,
parent: DataSuiteCollector,
*,
name: str,
) -> 'DataFileCollector':
return super().from_parent(parent, name=name)
def collect(self) -> Iterator['DataDrivenTestCase']:
yield from split_test_cases(
parent=self,
suite=self.parent.obj,
file=os.path.join(self.parent.obj.data_prefix, self.name),
)
def add_test_name_suffix(name: str, suffix: str) -> str:
# Find magic suffix of form "-foobar" (used for things like "-skip").
m = re.search(r'-[-A-Za-z0-9]+$', name)
if m:
# Insert suite-specific test name suffix before the magic suffix
# which must be the last thing in the test case name since we
# are using endswith() checks.
magic_suffix = m.group(0)
return name[:-len(magic_suffix)] + suffix + magic_suffix
else:
return name + suffix
def is_incremental(testcase: DataDrivenTestCase) -> bool:
return 'incremental' in testcase.name.lower() or 'incremental' in testcase.file
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
if any(re.match(r'# flags[2-9]:', line) for line in testcase.input):
return False
for filename, contents in testcase.files:
if os.path.basename(filename).startswith('mypy.ini.'):
return False
return True
class DataSuite:
# option fields - class variables
files: List[str]
base_path = test_temp_dir
# Allow external users of the test code to override the data prefix
data_prefix = test_data_prefix
required_out_section = False
native_sep = False
# Name suffix automatically added to each test case in the suite (can be
# used to distinguish test cases in suites that share data files)
test_name_suffix = ''
def setup(self) -> None:
"""Setup fixtures (ad-hoc)"""
pass
@abstractmethod
def run_case(self, testcase: DataDrivenTestCase) -> None:
raise NotImplementedError

View file

@ -0,0 +1,487 @@
import os
import re
import sys
import time
import shutil
import contextlib
from typing import List, Iterable, Dict, Tuple, Callable, Any, Iterator, Union
from mypy import defaults
import mypy.api as api
import pytest
# Exporting Suite as alias to TestCase for backwards compatibility
# TODO: avoid aliasing - import and subclass TestCase directly
from unittest import TestCase as Suite # noqa: F401 (re-exporting)
from mypy.main import process_options
from mypy.options import Options
from mypy.test.data import (
DataDrivenTestCase, fix_cobertura_filename, UpdateFile, DeleteFile
)
from mypy.test.config import test_temp_dir
import mypy.version
skip = pytest.mark.skip
# AssertStringArraysEqual displays special line alignment helper messages if
# the first different line has at least this many characters,
MIN_LINE_LENGTH_FOR_ALIGNMENT = 5
def run_mypy(args: List[str]) -> None:
__tracebackhide__ = True
# We must enable site packages even though they could cause problems,
# since stubs for typing_extensions live there.
outval, errval, status = api.run(args + ['--show-traceback',
'--no-silence-site-packages'])
if status != 0:
sys.stdout.write(outval)
sys.stderr.write(errval)
pytest.fail(msg="Sample check failed", pytrace=False)
def assert_string_arrays_equal(expected: List[str], actual: List[str],
msg: str) -> None:
"""Assert that two string arrays are equal.
We consider "can't" and "cannot" equivalent, by replacing the
former with the latter before comparing.
Display any differences in a human-readable form.
"""
__tracebackhide__ = True
actual = clean_up(actual)
actual = [line.replace("can't", "cannot") for line in actual]
expected = [line.replace("can't", "cannot") for line in expected]
if actual != expected:
num_skip_start = num_skipped_prefix_lines(expected, actual)
num_skip_end = num_skipped_suffix_lines(expected, actual)
sys.stderr.write('Expected:\n')
# If omit some lines at the beginning, indicate it by displaying a line
# with '...'.
if num_skip_start > 0:
sys.stderr.write(' ...\n')
# Keep track of the first different line.
first_diff = -1
# Display only this many first characters of identical lines.
width = 75
for i in range(num_skip_start, len(expected) - num_skip_end):
if i >= len(actual) or expected[i] != actual[i]:
if first_diff < 0:
first_diff = i
sys.stderr.write(' {:<45} (diff)'.format(expected[i]))
else:
e = expected[i]
sys.stderr.write(' ' + e[:width])
if len(e) > width:
sys.stderr.write('...')
sys.stderr.write('\n')
if num_skip_end > 0:
sys.stderr.write(' ...\n')
sys.stderr.write('Actual:\n')
if num_skip_start > 0:
sys.stderr.write(' ...\n')
for j in range(num_skip_start, len(actual) - num_skip_end):
if j >= len(expected) or expected[j] != actual[j]:
sys.stderr.write(' {:<45} (diff)'.format(actual[j]))
else:
a = actual[j]
sys.stderr.write(' ' + a[:width])
if len(a) > width:
sys.stderr.write('...')
sys.stderr.write('\n')
if not actual:
sys.stderr.write(' (empty)\n')
if num_skip_end > 0:
sys.stderr.write(' ...\n')
sys.stderr.write('\n')
if 0 <= first_diff < len(actual) and (
len(expected[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT
or len(actual[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT):
# Display message that helps visualize the differences between two
# long lines.
show_align_message(expected[first_diff], actual[first_diff])
raise AssertionError(msg)
def assert_module_equivalence(name: str,
expected: Iterable[str], actual: Iterable[str]) -> None:
expected_normalized = sorted(expected)
actual_normalized = sorted(set(actual).difference({"__main__"}))
assert_string_arrays_equal(
expected_normalized,
actual_normalized,
('Actual modules ({}) do not match expected modules ({}) '
'for "[{} ...]"').format(
', '.join(actual_normalized),
', '.join(expected_normalized),
name))
def assert_target_equivalence(name: str,
expected: List[str], actual: List[str]) -> None:
"""Compare actual and expected targets (order sensitive)."""
assert_string_arrays_equal(
expected,
actual,
('Actual targets ({}) do not match expected targets ({}) '
'for "[{} ...]"').format(
', '.join(actual),
', '.join(expected),
name))
def update_testcase_output(testcase: DataDrivenTestCase, output: List[str]) -> None:
assert testcase.old_cwd is not None, "test was not properly set up"
testcase_path = os.path.join(testcase.old_cwd, testcase.file)
with open(testcase_path, encoding='utf8') as f:
data_lines = f.read().splitlines()
test = '\n'.join(data_lines[testcase.line:testcase.last_line])
mapping: Dict[str, List[str]] = {}
for old, new in zip(testcase.output, output):
PREFIX = 'error:'
ind = old.find(PREFIX)
if ind != -1 and old[:ind] == new[:ind]:
old, new = old[ind + len(PREFIX):], new[ind + len(PREFIX):]
mapping.setdefault(old, []).append(new)
for old in mapping:
if test.count(old) == len(mapping[old]):
betweens = test.split(old)
# Interleave betweens and mapping[old]
from itertools import chain
interleaved = [betweens[0]] + \
list(chain.from_iterable(zip(mapping[old], betweens[1:])))
test = ''.join(interleaved)
data_lines[testcase.line:testcase.last_line] = [test]
data = '\n'.join(data_lines)
with open(testcase_path, 'w', encoding='utf8') as f:
print(data, file=f)
def show_align_message(s1: str, s2: str) -> None:
"""Align s1 and s2 so that the their first difference is highlighted.
For example, if s1 is 'foobar' and s2 is 'fobar', display the
following lines:
E: foobar
A: fobar
^
If s1 and s2 are long, only display a fragment of the strings around the
first difference. If s1 is very short, do nothing.
"""
# Seeing what went wrong is trivial even without alignment if the expected
# string is very short. In this case do nothing to simplify output.
if len(s1) < 4:
return
maxw = 72 # Maximum number of characters shown
sys.stderr.write('Alignment of first line difference:\n')
trunc = False
while s1[:30] == s2[:30]:
s1 = s1[10:]
s2 = s2[10:]
trunc = True
if trunc:
s1 = '...' + s1
s2 = '...' + s2
max_len = max(len(s1), len(s2))
extra = ''
if max_len > maxw:
extra = '...'
# Write a chunk of both lines, aligned.
sys.stderr.write(' E: {}{}\n'.format(s1[:maxw], extra))
sys.stderr.write(' A: {}{}\n'.format(s2[:maxw], extra))
# Write an indicator character under the different columns.
sys.stderr.write(' ')
for j in range(min(maxw, max(len(s1), len(s2)))):
if s1[j:j + 1] != s2[j:j + 1]:
sys.stderr.write('^') # Difference
break
else:
sys.stderr.write(' ') # Equal
sys.stderr.write('\n')
def clean_up(a: List[str]) -> List[str]:
"""Remove common directory prefix from all strings in a.
This uses a naive string replace; it seems to work well enough. Also
remove trailing carriage returns.
"""
res = []
pwd = os.getcwd()
driver = pwd + '/driver.py'
for s in a:
prefix = os.sep
ss = s
for p in prefix, prefix.replace(os.sep, '/'):
if p != '/' and p != '//' and p != '\\' and p != '\\\\':
ss = ss.replace(p, '')
# Ignore spaces at end of line.
ss = re.sub(' +$', '', ss)
# Remove pwd from driver.py's path
ss = ss.replace(driver, 'driver.py')
res.append(re.sub('\\r$', '', ss))
return res
@contextlib.contextmanager
def local_sys_path_set() -> Iterator[None]:
"""Temporary insert current directory into sys.path.
This can be used by test cases that do runtime imports, for example
by the stubgen tests.
"""
old_sys_path = sys.path[:]
if not ('' in sys.path or '.' in sys.path):
sys.path.insert(0, '')
try:
yield
finally:
sys.path = old_sys_path
def num_skipped_prefix_lines(a1: List[str], a2: List[str]) -> int:
num_eq = 0
while num_eq < min(len(a1), len(a2)) and a1[num_eq] == a2[num_eq]:
num_eq += 1
return max(0, num_eq - 4)
def num_skipped_suffix_lines(a1: List[str], a2: List[str]) -> int:
num_eq = 0
while (num_eq < min(len(a1), len(a2))
and a1[-num_eq - 1] == a2[-num_eq - 1]):
num_eq += 1
return max(0, num_eq - 4)
def testfile_pyversion(path: str) -> Tuple[int, int]:
if path.endswith('python2.test'):
return defaults.PYTHON2_VERSION
elif path.endswith('python310.test'):
return 3, 10
else:
return defaults.PYTHON3_VERSION
def testcase_pyversion(path: str, testcase_name: str) -> Tuple[int, int]:
if testcase_name.endswith('python2'):
return defaults.PYTHON2_VERSION
else:
return testfile_pyversion(path)
def normalize_error_messages(messages: List[str]) -> List[str]:
"""Translate an array of error messages to use / as path separator."""
a = []
for m in messages:
a.append(m.replace(os.sep, '/'))
return a
def retry_on_error(func: Callable[[], Any], max_wait: float = 1.0) -> None:
"""Retry callback with exponential backoff when it raises OSError.
If the function still generates an error after max_wait seconds, propagate
the exception.
This can be effective against random file system operation failures on
Windows.
"""
t0 = time.time()
wait_time = 0.01
while True:
try:
func()
return
except OSError:
wait_time = min(wait_time * 2, t0 + max_wait - time.time())
if wait_time <= 0.01:
# Done enough waiting, the error seems persistent.
raise
time.sleep(wait_time)
def good_repr(obj: object) -> str:
if isinstance(obj, str):
if obj.count('\n') > 1:
bits = ["'''\\"]
for line in obj.split('\n'):
# force repr to use ' not ", then cut it off
bits.append(repr('"' + line)[2:-1])
bits[-1] += "'''"
return '\n'.join(bits)
return repr(obj)
def assert_equal(a: object, b: object, fmt: str = '{} != {}') -> None:
__tracebackhide__ = True
if a != b:
raise AssertionError(fmt.format(good_repr(a), good_repr(b)))
def typename(t: type) -> str:
if '.' in str(t):
return str(t).split('.')[-1].rstrip("'>")
else:
return str(t)[8:-2]
def assert_type(typ: type, value: object) -> None:
__tracebackhide__ = True
if type(value) != typ:
raise AssertionError('Invalid type {}, expected {}'.format(
typename(type(value)), typename(typ)))
def parse_options(program_text: str, testcase: DataDrivenTestCase,
incremental_step: int) -> Options:
"""Parse comments like '# flags: --foo' in a test case."""
options = Options()
flags = re.search('# flags: (.*)$', program_text, flags=re.MULTILINE)
if incremental_step > 1:
flags2 = re.search('# flags{}: (.*)$'.format(incremental_step), program_text,
flags=re.MULTILINE)
if flags2:
flags = flags2
if flags:
flag_list = flags.group(1).split()
flag_list.append('--no-site-packages') # the tests shouldn't need an installed Python
targets, options = process_options(flag_list, require_targets=False)
if targets:
# TODO: support specifying targets via the flags pragma
raise RuntimeError('Specifying targets via the flags pragma is not supported.')
else:
flag_list = []
options = Options()
# TODO: Enable strict optional in test cases by default (requires *many* test case changes)
options.strict_optional = False
options.error_summary = False
# Allow custom python version to override testcase_pyversion.
if all(flag.split('=')[0] not in ['--python-version', '-2', '--py2'] for flag in flag_list):
options.python_version = testcase_pyversion(testcase.file, testcase.name)
if testcase.config.getoption('--mypy-verbose'):
options.verbosity = testcase.config.getoption('--mypy-verbose')
return options
def split_lines(*streams: bytes) -> List[str]:
"""Returns a single list of string lines from the byte streams in args."""
return [
s
for stream in streams
for s in stream.decode('utf8').splitlines()
]
def write_and_fudge_mtime(content: str, target_path: str) -> None:
# In some systems, mtime has a resolution of 1 second which can
# cause annoying-to-debug issues when a file has the same size
# after a change. We manually set the mtime to circumvent this.
# Note that we increment the old file's mtime, which guarantees a
# different value, rather than incrementing the mtime after the
# copy, which could leave the mtime unchanged if the old file had
# a similarly fudged mtime.
new_time = None
if os.path.isfile(target_path):
new_time = os.stat(target_path).st_mtime + 1
dir = os.path.dirname(target_path)
os.makedirs(dir, exist_ok=True)
with open(target_path, "w", encoding="utf-8") as target:
target.write(content)
if new_time:
os.utime(target_path, times=(new_time, new_time))
def perform_file_operations(
operations: List[Union[UpdateFile, DeleteFile]]) -> None:
for op in operations:
if isinstance(op, UpdateFile):
# Modify/create file
write_and_fudge_mtime(op.content, op.target_path)
else:
# Delete file/directory
if os.path.isdir(op.path):
# Sanity check to avoid unexpected deletions
assert op.path.startswith('tmp')
shutil.rmtree(op.path)
else:
# Use retries to work around potential flakiness on Windows (AppVeyor).
path = op.path
retry_on_error(lambda: os.remove(path))
def check_test_output_files(testcase: DataDrivenTestCase,
step: int,
strip_prefix: str = '') -> None:
for path, expected_content in testcase.output_files:
if path.startswith(strip_prefix):
path = path[len(strip_prefix):]
if not os.path.exists(path):
raise AssertionError(
'Expected file {} was not produced by test case{}'.format(
path, ' on step %d' % step if testcase.output2 else ''))
with open(path, 'r', encoding='utf8') as output_file:
actual_output_content = output_file.read().splitlines()
normalized_output = normalize_file_output(actual_output_content,
os.path.abspath(test_temp_dir))
# We always normalize things like timestamp, but only handle operating-system
# specific things if requested.
if testcase.normalize_output:
if testcase.suite.native_sep and os.path.sep == '\\':
normalized_output = [fix_cobertura_filename(line)
for line in normalized_output]
normalized_output = normalize_error_messages(normalized_output)
assert_string_arrays_equal(expected_content.splitlines(), normalized_output,
'Output file {} did not match its expected output{}'.format(
path, ' on step %d' % step if testcase.output2 else ''))
def normalize_file_output(content: List[str], current_abs_path: str) -> List[str]:
"""Normalize file output for comparison."""
timestamp_regex = re.compile(r'\d{10}')
result = [x.replace(current_abs_path, '$PWD') for x in content]
version = mypy.version.__version__
result = [re.sub(r'\b' + re.escape(version) + r'\b', '$VERSION', x) for x in result]
# We generate a new mypy.version when building mypy wheels that
# lacks base_version, so handle that case.
base_version = getattr(mypy.version, 'base_version', version)
result = [re.sub(r'\b' + re.escape(base_version) + r'\b', '$VERSION', x) for x in result]
result = [timestamp_regex.sub('$TIMESTAMP', x) for x in result]
return result

View file

@ -0,0 +1,379 @@
import os
import pytest
import shutil
import tempfile
import unittest
from typing import List, Optional, Set, Tuple
from mypy.find_sources import InvalidSourceList, SourceFinder, create_source_list
from mypy.fscache import FileSystemCache
from mypy.options import Options
from mypy.modulefinder import BuildSource
class FakeFSCache(FileSystemCache):
def __init__(self, files: Set[str]) -> None:
self.files = {os.path.abspath(f) for f in files}
def isfile(self, file: str) -> bool:
return file in self.files
def isdir(self, dir: str) -> bool:
if not dir.endswith(os.sep):
dir += os.sep
return any(f.startswith(dir) for f in self.files)
def listdir(self, dir: str) -> List[str]:
if not dir.endswith(os.sep):
dir += os.sep
return list(set(f[len(dir):].split(os.sep)[0] for f in self.files if f.startswith(dir)))
def init_under_package_root(self, file: str) -> bool:
return False
def normalise_path(path: str) -> str:
path = os.path.splitdrive(path)[1]
path = path.replace(os.sep, "/")
return path
def normalise_build_source_list(sources: List[BuildSource]) -> List[Tuple[str, Optional[str]]]:
return sorted(
(s.module, (normalise_path(s.base_dir) if s.base_dir is not None else None))
for s in sources
)
def crawl(finder: SourceFinder, f: str) -> Tuple[str, str]:
module, base_dir = finder.crawl_up(f)
return module, normalise_path(base_dir)
def find_sources_in_dir(finder: SourceFinder, f: str) -> List[Tuple[str, Optional[str]]]:
return normalise_build_source_list(finder.find_sources_in_dir(os.path.abspath(f)))
def find_sources(
paths: List[str], options: Options, fscache: FileSystemCache
) -> List[Tuple[str, Optional[str]]]:
paths = [os.path.abspath(p) for p in paths]
return normalise_build_source_list(create_source_list(paths, options, fscache))
class SourceFinderSuite(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.mkdtemp()
self.oldcwd = os.getcwd()
os.chdir(self.tempdir)
def tearDown(self) -> None:
os.chdir(self.oldcwd)
shutil.rmtree(self.tempdir)
def test_crawl_no_namespace(self) -> None:
options = Options()
options.namespace_packages = False
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
assert crawl(finder, "/setup.py") == ("setup", "/")
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
finder = SourceFinder(
FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}),
options,
)
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
finder = SourceFinder(
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}),
options,
)
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
def test_crawl_namespace(self) -> None:
options = Options()
options.namespace_packages = True
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
assert crawl(finder, "/setup.py") == ("setup", "/")
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
finder = SourceFinder(
FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}),
options,
)
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("a.b.setup", "/")
finder = SourceFinder(
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}),
options,
)
assert crawl(finder, "/a/b/c/setup.py") == ("a.b.c.setup", "/")
def test_crawl_namespace_explicit_base(self) -> None:
options = Options()
options.namespace_packages = True
options.explicit_package_bases = True
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
assert crawl(finder, "/setup.py") == ("setup", "/")
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
finder = SourceFinder(
FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}),
options,
)
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
assert crawl(finder, "/a/b/setup.py") == ("a.b.setup", "/")
finder = SourceFinder(
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}),
options,
)
assert crawl(finder, "/a/b/c/setup.py") == ("a.b.c.setup", "/")
# set mypy path, so we actually have some explicit base dirs
options.mypy_path = ["/a/b"]
finder = SourceFinder(FakeFSCache({"/a/b/c/setup.py"}), options)
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
finder = SourceFinder(
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}),
options,
)
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
options.mypy_path = ["/a/b", "/a/b/c"]
finder = SourceFinder(FakeFSCache({"/a/b/c/setup.py"}), options)
assert crawl(finder, "/a/b/c/setup.py") == ("setup", "/a/b/c")
def test_crawl_namespace_multi_dir(self) -> None:
options = Options()
options.namespace_packages = True
options.explicit_package_bases = True
options.mypy_path = ["/a", "/b"]
finder = SourceFinder(FakeFSCache({"/a/pkg/a.py", "/b/pkg/b.py"}), options)
assert crawl(finder, "/a/pkg/a.py") == ("pkg.a", "/a")
assert crawl(finder, "/b/pkg/b.py") == ("pkg.b", "/b")
def test_find_sources_in_dir_no_namespace(self) -> None:
options = Options()
options.namespace_packages = False
files = {
"/pkg/a1/b/c/d/e.py",
"/pkg/a1/b/f.py",
"/pkg/a2/__init__.py",
"/pkg/a2/b/c/d/e.py",
"/pkg/a2/b/f.py",
}
finder = SourceFinder(FakeFSCache(files), options)
assert find_sources_in_dir(finder, "/") == [
("a2", "/pkg"),
("e", "/pkg/a1/b/c/d"),
("e", "/pkg/a2/b/c/d"),
("f", "/pkg/a1/b"),
("f", "/pkg/a2/b"),
]
def test_find_sources_in_dir_namespace(self) -> None:
options = Options()
options.namespace_packages = True
files = {
"/pkg/a1/b/c/d/e.py",
"/pkg/a1/b/f.py",
"/pkg/a2/__init__.py",
"/pkg/a2/b/c/d/e.py",
"/pkg/a2/b/f.py",
}
finder = SourceFinder(FakeFSCache(files), options)
assert find_sources_in_dir(finder, "/") == [
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("a2.b.f", "/pkg"),
("e", "/pkg/a1/b/c/d"),
("f", "/pkg/a1/b"),
]
def test_find_sources_in_dir_namespace_explicit_base(self) -> None:
options = Options()
options.namespace_packages = True
options.explicit_package_bases = True
options.mypy_path = ["/"]
files = {
"/pkg/a1/b/c/d/e.py",
"/pkg/a1/b/f.py",
"/pkg/a2/__init__.py",
"/pkg/a2/b/c/d/e.py",
"/pkg/a2/b/f.py",
}
finder = SourceFinder(FakeFSCache(files), options)
assert find_sources_in_dir(finder, "/") == [
("pkg.a1.b.c.d.e", "/"),
("pkg.a1.b.f", "/"),
("pkg.a2", "/"),
("pkg.a2.b.c.d.e", "/"),
("pkg.a2.b.f", "/"),
]
options.mypy_path = ["/pkg"]
finder = SourceFinder(FakeFSCache(files), options)
assert find_sources_in_dir(finder, "/") == [
("a1.b.c.d.e", "/pkg"),
("a1.b.f", "/pkg"),
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("a2.b.f", "/pkg"),
]
def test_find_sources_in_dir_namespace_multi_dir(self) -> None:
options = Options()
options.namespace_packages = True
options.explicit_package_bases = True
options.mypy_path = ["/a", "/b"]
finder = SourceFinder(FakeFSCache({"/a/pkg/a.py", "/b/pkg/b.py"}), options)
assert find_sources_in_dir(finder, "/") == [("pkg.a", "/a"), ("pkg.b", "/b")]
def test_find_sources_exclude(self) -> None:
options = Options()
options.namespace_packages = True
# default
for excluded_dir in ["site-packages", ".whatever", "node_modules", ".x/.z"]:
fscache = FakeFSCache({"/dir/a.py", "/dir/venv/{}/b.py".format(excluded_dir)})
assert find_sources(["/"], options, fscache) == [("a", "/dir")]
with pytest.raises(InvalidSourceList):
find_sources(["/dir/venv/"], options, fscache)
assert find_sources(["/dir/venv/{}".format(excluded_dir)], options, fscache) == [
("b", "/dir/venv/{}".format(excluded_dir))
]
assert find_sources(["/dir/venv/{}/b.py".format(excluded_dir)], options, fscache) == [
("b", "/dir/venv/{}".format(excluded_dir))
]
files = {
"/pkg/a1/b/c/d/e.py",
"/pkg/a1/b/f.py",
"/pkg/a2/__init__.py",
"/pkg/a2/b/c/d/e.py",
"/pkg/a2/b/f.py",
}
# file name
options.exclude = [r"/f\.py$"]
fscache = FakeFSCache(files)
assert find_sources(["/"], options, fscache) == [
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("e", "/pkg/a1/b/c/d"),
]
assert find_sources(["/pkg/a1/b/f.py"], options, fscache) == [('f', '/pkg/a1/b')]
assert find_sources(["/pkg/a2/b/f.py"], options, fscache) == [('a2.b.f', '/pkg')]
# directory name
options.exclude = ["/a1/"]
fscache = FakeFSCache(files)
assert find_sources(["/"], options, fscache) == [
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("a2.b.f", "/pkg"),
]
with pytest.raises(InvalidSourceList):
find_sources(["/pkg/a1"], options, fscache)
with pytest.raises(InvalidSourceList):
find_sources(["/pkg/a1/"], options, fscache)
with pytest.raises(InvalidSourceList):
find_sources(["/pkg/a1/b"], options, fscache)
options.exclude = ["/a1/$"]
assert find_sources(["/pkg/a1"], options, fscache) == [
('e', '/pkg/a1/b/c/d'), ('f', '/pkg/a1/b')
]
# paths
options.exclude = ["/pkg/a1/"]
fscache = FakeFSCache(files)
assert find_sources(["/"], options, fscache) == [
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("a2.b.f", "/pkg"),
]
with pytest.raises(InvalidSourceList):
find_sources(["/pkg/a1"], options, fscache)
# OR two patterns together
for orred in [["/(a1|a3)/"], ["a1", "a3"], ["a3", "a1"]]:
options.exclude = orred
fscache = FakeFSCache(files)
assert find_sources(["/"], options, fscache) == [
("a2", "/pkg"),
("a2.b.c.d.e", "/pkg"),
("a2.b.f", "/pkg"),
]
options.exclude = ["b/c/"]
fscache = FakeFSCache(files)
assert find_sources(["/"], options, fscache) == [
("a2", "/pkg"),
("a2.b.f", "/pkg"),
("f", "/pkg/a1/b"),
]
# nothing should be ignored as a result of this
big_exclude1 = [
"/pkg/a/", "/2", "/1", "/pk/", "/kg", "/g.py", "/bc", "/xxx/pkg/a2/b/f.py"
"xxx/pkg/a2/b/f.py",
]
big_exclude2 = ["|".join(big_exclude1)]
for big_exclude in [big_exclude1, big_exclude2]:
options.exclude = big_exclude
fscache = FakeFSCache(files)
assert len(find_sources(["/"], options, fscache)) == len(files)
files = {
"pkg/a1/b/c/d/e.py",
"pkg/a1/b/f.py",
"pkg/a2/__init__.py",
"pkg/a2/b/c/d/e.py",
"pkg/a2/b/f.py",
}
fscache = FakeFSCache(files)
assert len(find_sources(["."], options, fscache)) == len(files)

View file

@ -0,0 +1,45 @@
from io import StringIO
import sys
import mypy.api
from mypy.test.helpers import Suite
class APISuite(Suite):
def setUp(self) -> None:
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = StringIO()
sys.stderr = self.stderr = StringIO()
def tearDown(self) -> None:
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
assert self.stdout.getvalue() == ''
assert self.stderr.getvalue() == ''
def test_capture_bad_opt(self) -> None:
"""stderr should be captured when a bad option is passed."""
_, stderr, _ = mypy.api.run(['--some-bad-option'])
assert isinstance(stderr, str)
assert stderr != ''
def test_capture_empty(self) -> None:
"""stderr should be captured when a bad option is passed."""
_, stderr, _ = mypy.api.run([])
assert isinstance(stderr, str)
assert stderr != ''
def test_capture_help(self) -> None:
"""stdout should be captured when --help is passed."""
stdout, _, _ = mypy.api.run(['--help'])
assert isinstance(stdout, str)
assert stdout != ''
def test_capture_version(self) -> None:
"""stdout should be captured when --version is passed."""
stdout, _, _ = mypy.api.run(['--version'])
assert isinstance(stdout, str)
assert stdout != ''

View file

@ -0,0 +1,73 @@
"""Ensure the argparse parser and Options class are in sync.
In particular, verify that the argparse defaults are the same as the Options
defaults, and that argparse doesn't assign any new members to the Options
object it creates.
"""
import argparse
import sys
from mypy.test.helpers import Suite, assert_equal
from mypy.options import Options
from mypy.main import process_options, infer_python_executable
class ArgSuite(Suite):
def test_coherence(self) -> None:
options = Options()
_, parsed_options = process_options([], require_targets=False)
# FIX: test this too. Requires changing working dir to avoid finding 'setup.cfg'
options.config_file = parsed_options.config_file
assert_equal(options.snapshot(), parsed_options.snapshot())
def test_executable_inference(self) -> None:
"""Test the --python-executable flag with --python-version"""
sys_ver_str = '{ver.major}.{ver.minor}'.format(ver=sys.version_info)
base = ['file.py'] # dummy file
# test inference given one (infer the other)
matching_version = base + ['--python-version={}'.format(sys_ver_str)]
_, options = process_options(matching_version)
assert options.python_version == sys.version_info[:2]
assert options.python_executable == sys.executable
matching_version = base + ['--python-executable={}'.format(sys.executable)]
_, options = process_options(matching_version)
assert options.python_version == sys.version_info[:2]
assert options.python_executable == sys.executable
# test inference given both
matching_version = base + ['--python-version={}'.format(sys_ver_str),
'--python-executable={}'.format(sys.executable)]
_, options = process_options(matching_version)
assert options.python_version == sys.version_info[:2]
assert options.python_executable == sys.executable
# test that --no-site-packages will disable executable inference
matching_version = base + ['--python-version={}'.format(sys_ver_str),
'--no-site-packages']
_, options = process_options(matching_version)
assert options.python_version == sys.version_info[:2]
assert options.python_executable is None
# Test setting python_version/executable from config file
special_opts = argparse.Namespace()
special_opts.python_executable = None
special_opts.python_version = None
special_opts.no_executable = None
# first test inferring executable from version
options = Options()
options.python_executable = None
options.python_version = sys.version_info[:2]
infer_python_executable(options, special_opts)
assert options.python_version == sys.version_info[:2]
assert options.python_executable == sys.executable
# then test inferring version from executable
options = Options()
options.python_executable = sys.executable
infer_python_executable(options, special_opts)
assert options.python_version == sys.version_info[:2]
assert options.python_executable == sys.executable

View file

@ -0,0 +1,353 @@
"""Type checker test cases"""
import os
import re
import sys
from typing import Dict, List, Set, Tuple
from mypy import build
from mypy.build import Graph
from mypy.modulefinder import BuildSource, SearchPaths, FindModuleCache
from mypy.test.config import test_temp_dir, test_data_prefix
from mypy.test.data import (
DataDrivenTestCase, DataSuite, FileOperation, module_from_path
)
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages, assert_module_equivalence,
update_testcase_output, parse_options,
assert_target_equivalence, check_test_output_files, perform_file_operations,
)
from mypy.errors import CompileError
from mypy.semanal_main import core_modules
# List of files that contain test case descriptions.
typecheck_files = [
'check-basic.test',
'check-union-or-syntax.test',
'check-callable.test',
'check-classes.test',
'check-statements.test',
'check-generics.test',
'check-dynamic-typing.test',
'check-inference.test',
'check-inference-context.test',
'check-kwargs.test',
'check-overloading.test',
'check-type-checks.test',
'check-abstract.test',
'check-multiple-inheritance.test',
'check-super.test',
'check-modules.test',
'check-typevar-values.test',
'check-unsupported.test',
'check-unreachable-code.test',
'check-unions.test',
'check-isinstance.test',
'check-lists.test',
'check-namedtuple.test',
'check-narrowing.test',
'check-typeddict.test',
'check-type-aliases.test',
'check-ignore.test',
'check-type-promotion.test',
'check-semanal-error.test',
'check-flags.test',
'check-incremental.test',
'check-serialize.test',
'check-bound.test',
'check-optional.test',
'check-fastparse.test',
'check-warnings.test',
'check-async-await.test',
'check-newtype.test',
'check-class-namedtuple.test',
'check-selftype.test',
'check-python2.test',
'check-columns.test',
'check-functions.test',
'check-tuples.test',
'check-expressions.test',
'check-generic-subtyping.test',
'check-varargs.test',
'check-newsyntax.test',
'check-protocols.test',
'check-underscores.test',
'check-classvar.test',
'check-enum.test',
'check-incomplete-fixture.test',
'check-custom-plugin.test',
'check-default-plugin.test',
'check-attr.test',
'check-ctypes.test',
'check-dataclasses.test',
'check-final.test',
'check-redefine.test',
'check-literal.test',
'check-newsemanal.test',
'check-inline-config.test',
'check-reports.test',
'check-errorcodes.test',
'check-annotated.test',
'check-parameter-specification.test',
'check-generic-alias.test',
'check-typeguard.test',
'check-functools.test',
'check-singledispatch.test',
'check-slots.test',
'check-formatting.test',
]
# Tests that use Python 3.8-only AST features (like expression-scoped ignores):
if sys.version_info >= (3, 8):
typecheck_files.append('check-python38.test')
if sys.version_info >= (3, 9):
typecheck_files.append('check-python39.test')
if sys.version_info >= (3, 10):
typecheck_files.append('check-python310.test')
# Special tests for platforms with case-insensitive filesystems.
if sys.platform in ('darwin', 'win32'):
typecheck_files.extend(['check-modules-case.test'])
class TypeCheckSuite(DataSuite):
files = typecheck_files
def run_case(self, testcase: DataDrivenTestCase) -> None:
incremental = ('incremental' in testcase.name.lower()
or 'incremental' in testcase.file
or 'serialize' in testcase.file)
if incremental:
# Incremental tests are run once with a cold cache, once with a warm cache.
# Expect success on first run, errors from testcase.output (if any) on second run.
num_steps = max([2] + list(testcase.output2.keys()))
# Check that there are no file changes beyond the last run (they would be ignored).
for dn, dirs, files in os.walk(os.curdir):
for file in files:
m = re.search(r'\.([2-9])$', file)
if m and int(m.group(1)) > num_steps:
raise ValueError(
'Output file {} exists though test case only has {} runs'.format(
file, num_steps))
steps = testcase.find_steps()
for step in range(1, num_steps + 1):
idx = step - 2
ops = steps[idx] if idx < len(steps) and idx >= 0 else []
self.run_case_once(testcase, ops, step)
else:
self.run_case_once(testcase)
def run_case_once(self, testcase: DataDrivenTestCase,
operations: List[FileOperation] = [],
incremental_step: int = 0) -> None:
original_program_text = '\n'.join(testcase.input)
module_data = self.parse_module(original_program_text, incremental_step)
# Unload already loaded plugins, they may be updated.
for file, _ in testcase.files:
module = module_from_path(file)
if module.endswith('_plugin') and module in sys.modules:
del sys.modules[module]
if incremental_step == 0 or incremental_step == 1:
# In run 1, copy program text to program file.
for module_name, program_path, program_text in module_data:
if module_name == '__main__':
with open(program_path, 'w', encoding='utf8') as f:
f.write(program_text)
break
elif incremental_step > 1:
# In runs 2+, copy *.[num] files to * files.
perform_file_operations(operations)
# Parse options after moving files (in case mypy.ini is being moved).
options = parse_options(original_program_text, testcase, incremental_step)
options.use_builtins_fixtures = True
options.show_traceback = True
# Enable some options automatically based on test file name.
if 'optional' in testcase.file:
options.strict_optional = True
if 'columns' in testcase.file:
options.show_column_numbers = True
if 'errorcodes' in testcase.file:
options.show_error_codes = True
if incremental_step and options.incremental:
# Don't overwrite # flags: --no-incremental in incremental test cases
options.incremental = True
else:
options.incremental = False
# Don't waste time writing cache unless we are specifically looking for it
if not testcase.writescache:
options.cache_dir = os.devnull
sources = []
for module_name, program_path, program_text in module_data:
# Always set to none so we're forced to reread the module in incremental mode
sources.append(BuildSource(program_path, module_name,
None if incremental_step else program_text))
plugin_dir = os.path.join(test_data_prefix, 'plugins')
sys.path.insert(0, plugin_dir)
res = None
try:
res = build.build(sources=sources,
options=options,
alt_lib_path=test_temp_dir)
a = res.errors
except CompileError as e:
a = e.messages
finally:
assert sys.path[0] == plugin_dir
del sys.path[0]
if testcase.normalize_output:
a = normalize_error_messages(a)
# Make sure error messages match
if incremental_step == 0:
# Not incremental
msg = 'Unexpected type checker output ({}, line {})'
output = testcase.output
elif incremental_step == 1:
msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})'
output = testcase.output
elif incremental_step > 1:
msg = ('Unexpected type checker output in incremental, run {}'.format(
incremental_step) + ' ({}, line {})')
output = testcase.output2.get(incremental_step, [])
else:
raise AssertionError()
if output != a and testcase.config.getoption('--update-data', False):
update_testcase_output(testcase, a)
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
if res:
if options.cache_dir != os.devnull:
self.verify_cache(module_data, res.errors, res.manager, res.graph)
name = 'targets'
if incremental_step:
name += str(incremental_step + 1)
expected = testcase.expected_fine_grained_targets.get(incremental_step + 1)
actual = res.manager.processed_targets
# Skip the initial builtin cycle.
actual = [t for t in actual
if not any(t.startswith(mod)
for mod in core_modules + ['mypy_extensions'])]
if expected is not None:
assert_target_equivalence(name, expected, actual)
if incremental_step > 1:
suffix = '' if incremental_step == 2 else str(incremental_step - 1)
expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
'rechecked' + suffix,
expected_rechecked, res.manager.rechecked_modules)
expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
if expected_stale is not None:
assert_module_equivalence(
'stale' + suffix,
expected_stale, res.manager.stale_modules)
if testcase.output_files:
check_test_output_files(testcase, incremental_step, strip_prefix='tmp/')
def verify_cache(self, module_data: List[Tuple[str, str, str]], a: List[str],
manager: build.BuildManager, graph: Graph) -> None:
# There should be valid cache metadata for each module except
# for those that had an error in themselves or one of their
# dependencies.
error_paths = self.find_error_message_paths(a)
busted_paths = {m.path for id, m in manager.modules.items()
if graph[id].transitive_error}
modules = self.find_module_files(manager)
modules.update({module_name: path for module_name, path, text in module_data})
missing_paths = self.find_missing_cache_files(modules, manager)
# We would like to assert error_paths.issubset(busted_paths)
# but this runs into trouble because while some 'notes' are
# really errors that cause an error to be marked, many are
# just notes attached to other errors.
assert error_paths or not busted_paths, "Some modules reported error despite no errors"
if not missing_paths == busted_paths:
raise AssertionError("cache data discrepancy %s != %s" %
(missing_paths, busted_paths))
assert os.path.isfile(os.path.join(manager.options.cache_dir, ".gitignore"))
cachedir_tag = os.path.join(manager.options.cache_dir, "CACHEDIR.TAG")
assert os.path.isfile(cachedir_tag)
with open(cachedir_tag) as f:
assert f.read().startswith("Signature: 8a477f597d28d172789f06886806bc55")
def find_error_message_paths(self, a: List[str]) -> Set[str]:
hits = set()
for line in a:
m = re.match(r'([^\s:]+):(\d+:)?(\d+:)? (error|warning|note):', line)
if m:
p = m.group(1)
hits.add(p)
return hits
def find_module_files(self, manager: build.BuildManager) -> Dict[str, str]:
modules = {}
for id, module in manager.modules.items():
modules[id] = module.path
return modules
def find_missing_cache_files(self, modules: Dict[str, str],
manager: build.BuildManager) -> Set[str]:
ignore_errors = True
missing = {}
for id, path in modules.items():
meta = build.find_cache_meta(id, path, manager)
if not build.validate_meta(meta, id, path, ignore_errors, manager):
missing[id] = path
return set(missing.values())
def parse_module(self,
program_text: str,
incremental_step: int = 0) -> List[Tuple[str, str, str]]:
"""Return the module and program names for a test case.
Normally, the unit tests will parse the default ('__main__')
module and follow all the imports listed there. You can override
this behavior and instruct the tests to check multiple modules
by using a comment like this in the test case input:
# cmd: mypy -m foo.bar foo.baz
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
Return a list of tuples (module name, file name, program text).
"""
m = re.search('# cmd: mypy -m ([a-zA-Z0-9_. ]+)$', program_text, flags=re.MULTILINE)
if incremental_step > 1:
alt_regex = '# cmd{}: mypy -m ([a-zA-Z0-9_. ]+)$'.format(incremental_step)
alt_m = re.search(alt_regex, program_text, flags=re.MULTILINE)
if alt_m is not None:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default main
# module. Look up the module and give it as the thing to
# analyze.
module_names = m.group(1)
out = []
search_paths = SearchPaths((test_temp_dir,), (), (), ())
cache = FindModuleCache(search_paths, fscache=None, options=None)
for module_name in module_names.split(' '):
path = cache.find_module(module_name)
assert isinstance(path, str), "Can't find ad hoc case file: %s" % module_name
with open(path, encoding='utf8') as f:
program_text = f.read()
out.append((module_name, path, program_text))
return out
else:
return [('__main__', 'main', program_text)]

View file

@ -0,0 +1,136 @@
"""Test cases for the command line.
To begin we test that "mypy <directory>[/]" always recurses down the
whole tree.
"""
import os
import re
import subprocess
import sys
from typing import List
from typing import Optional
from mypy.test.config import test_temp_dir, PREFIX
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages, check_test_output_files
)
# Path to Python 3 interpreter
python3_path = sys.executable
# Files containing test case descriptions.
cmdline_files = [
'cmdline.test',
'cmdline.pyproject.test',
'reports.test',
'envvars.test',
]
class PythonCmdlineSuite(DataSuite):
files = cmdline_files
native_sep = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
for step in [1] + sorted(testcase.output2):
test_python_cmdline(testcase, step)
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None:
assert testcase.old_cwd is not None, "test was not properly set up"
# Write the program to a file.
program = '_program.py'
program_path = os.path.join(test_temp_dir, program)
with open(program_path, 'w', encoding='utf8') as file:
for s in testcase.input:
file.write('{}\n'.format(s))
args = parse_args(testcase.input[0])
custom_cwd = parse_cwd(testcase.input[1]) if len(testcase.input) > 1 else None
args.append('--show-traceback')
if '--error-summary' not in args:
args.append('--no-error-summary')
# Type check the program.
fixed = [python3_path, '-m', 'mypy']
env = os.environ.copy()
env.pop('COLUMNS', None)
env['PYTHONPATH'] = PREFIX
process = subprocess.Popen(fixed + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.join(
test_temp_dir,
custom_cwd or ""
),
env=env)
outb, errb = process.communicate()
result = process.returncode
# Split output into lines.
out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()]
err = [s.rstrip('\n\r') for s in str(errb, 'utf8').splitlines()]
if "PYCHARM_HOSTED" in os.environ:
for pos, line in enumerate(err):
if line.startswith('pydev debugger: '):
# Delete the attaching debugger message itself, plus the extra newline added.
del err[pos:pos + 2]
break
# Remove temp file.
os.remove(program_path)
# Compare actual output to expected.
if testcase.output_files:
# Ignore stdout, but we insist on empty stderr and zero status.
if err or result:
raise AssertionError(
'Expected zero status and empty stderr%s, got %d and\n%s' %
(' on step %d' % step if testcase.output2 else '',
result, '\n'.join(err + out)))
check_test_output_files(testcase, step)
else:
if testcase.normalize_output:
out = normalize_error_messages(err + out)
obvious_result = 1 if out else 0
if obvious_result != result:
out.append('== Return code: {}'.format(result))
expected_out = testcase.output if step == 1 else testcase.output2[step]
# Strip "tmp/" out of the test so that # E: works...
expected_out = [s.replace("tmp" + os.sep, "") for s in expected_out]
assert_string_arrays_equal(expected_out, out,
'Invalid output ({}, line {}){}'.format(
testcase.file, testcase.line,
' on step %d' % step if testcase.output2 else ''))
def parse_args(line: str) -> List[str]:
"""Parse the first line of the program for the command line.
This should have the form
# cmd: mypy <options>
For example:
# cmd: mypy pkg/
"""
m = re.match('# cmd: mypy (.*)$', line)
if not m:
return [] # No args; mypy will spit out an error.
return m.group(1).split()
def parse_cwd(line: str) -> Optional[str]:
"""Parse the second line of the program for the command line.
This should have the form
# cwd: <directory>
For example:
# cwd: main/subdir
"""
m = re.match('# cwd: (.*)$', line)
return m.group(1) if m else None

View file

@ -0,0 +1,133 @@
"""End-to-end test cases for the daemon (dmypy).
These are special because they run multiple shell commands.
This also includes some unit tests.
"""
import os
import subprocess
import sys
import tempfile
import unittest
from typing import List, Tuple
from mypy.modulefinder import SearchPaths
from mypy.fscache import FileSystemCache
from mypy.dmypy_server import filter_out_missing_top_level_packages
from mypy.test.config import test_temp_dir, PREFIX
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages
# Files containing test cases descriptions.
daemon_files = [
'daemon.test',
]
class DaemonSuite(DataSuite):
files = daemon_files
def run_case(self, testcase: DataDrivenTestCase) -> None:
try:
test_daemon(testcase)
finally:
# Kill the daemon if it's still running.
run_cmd('dmypy kill')
def test_daemon(testcase: DataDrivenTestCase) -> None:
assert testcase.old_cwd is not None, "test was not properly set up"
for i, step in enumerate(parse_script(testcase.input)):
cmd = step[0]
expected_lines = step[1:]
assert cmd.startswith('$')
cmd = cmd[1:].strip()
cmd = cmd.replace('{python}', sys.executable)
sts, output = run_cmd(cmd)
output_lines = output.splitlines()
output_lines = normalize_error_messages(output_lines)
if sts:
output_lines.append('== Return code: %d' % sts)
assert_string_arrays_equal(expected_lines,
output_lines,
"Command %d (%s) did not give expected output" %
(i + 1, cmd))
def parse_script(input: List[str]) -> List[List[str]]:
"""Parse testcase.input into steps.
Each command starts with a line starting with '$'.
The first line (less '$') is sent to the shell.
The remaining lines are expected output.
"""
steps = []
step: List[str] = []
for line in input:
if line.startswith('$'):
if step:
assert step[0].startswith('$')
steps.append(step)
step = []
step.append(line)
if step:
steps.append(step)
return steps
def run_cmd(input: str) -> Tuple[int, str]:
if input.startswith('dmypy '):
input = sys.executable + ' -m mypy.' + input
if input.startswith('mypy '):
input = sys.executable + ' -m' + input
env = os.environ.copy()
env['PYTHONPATH'] = PREFIX
try:
output = subprocess.check_output(input,
shell=True,
stderr=subprocess.STDOUT,
universal_newlines=True,
cwd=test_temp_dir,
env=env)
return 0, output
except subprocess.CalledProcessError as err:
return err.returncode, err.output
class DaemonUtilitySuite(unittest.TestCase):
"""Unit tests for helpers"""
def test_filter_out_missing_top_level_packages(self) -> None:
with tempfile.TemporaryDirectory() as td:
self.make_file(td, 'base/a/')
self.make_file(td, 'base/b.py')
self.make_file(td, 'base/c.pyi')
self.make_file(td, 'base/missing.txt')
self.make_file(td, 'typeshed/d.pyi')
self.make_file(td, 'typeshed/@python2/e')
self.make_file(td, 'pkg1/f-stubs')
self.make_file(td, 'pkg2/g-python2-stubs')
self.make_file(td, 'mpath/sub/long_name/')
def makepath(p: str) -> str:
return os.path.join(td, p)
search = SearchPaths(python_path=(makepath('base'),),
mypy_path=(makepath('mpath/sub'),),
package_path=(makepath('pkg1'), makepath('pkg2')),
typeshed_path=(makepath('typeshed'),))
fscache = FileSystemCache()
res = filter_out_missing_top_level_packages(
{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'long_name', 'ff', 'missing'},
search,
fscache)
assert res == {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'long_name'}
def make_file(self, base: str, path: str) -> None:
fullpath = os.path.join(base, path)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
if not path.endswith('/'):
with open(fullpath, 'w') as f:
f.write('# test file')

View file

@ -0,0 +1,92 @@
"""Test cases for generating node-level dependencies (for fine-grained incremental checking)"""
import os
from collections import defaultdict
from typing import List, Tuple, Dict, Optional, Set
from typing_extensions import DefaultDict
from mypy import build, defaults
from mypy.modulefinder import BuildSource
from mypy.errors import CompileError
from mypy.nodes import MypyFile, Expression
from mypy.options import Options
from mypy.server.deps import get_dependencies
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, parse_options
from mypy.types import Type
from mypy.typestate import TypeState
# Only dependencies in these modules are dumped
dumped_modules = ['__main__', 'pkg', 'pkg.mod']
class GetDependenciesSuite(DataSuite):
files = [
'deps.test',
'deps-types.test',
'deps-generics.test',
'deps-expressions.test',
'deps-statements.test',
'deps-classes.test',
]
def run_case(self, testcase: DataDrivenTestCase) -> None:
src = '\n'.join(testcase.input)
dump_all = '# __dump_all__' in src
options = parse_options(src, testcase, incremental_step=1)
if testcase.name.endswith('python2'):
options.python_version = defaults.PYTHON2_VERSION
options.use_builtins_fixtures = True
options.show_traceback = True
options.cache_dir = os.devnull
options.export_types = True
options.preserve_asts = True
messages, files, type_map = self.build(src, options)
a = messages
if files is None or type_map is None:
if not a:
a = ['Unknown compile error (likely syntax error in test case or fixture)']
else:
deps: DefaultDict[str, Set[str]] = defaultdict(set)
for module in files:
if module in dumped_modules or dump_all and module not in ('abc',
'typing',
'mypy_extensions',
'typing_extensions',
'enum'):
new_deps = get_dependencies(files[module], type_map, options.python_version,
options)
for source in new_deps:
deps[source].update(new_deps[source])
TypeState.add_all_protocol_deps(deps)
for source, targets in sorted(deps.items()):
if source.startswith(('<enum', '<typing', '<mypy')):
# Remove noise.
continue
line = '%s -> %s' % (source, ', '.join(sorted(targets)))
# Clean up output a bit
line = line.replace('__main__', 'm')
a.append(line)
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(testcase.file,
testcase.line))
def build(self,
source: str,
options: Options) -> Tuple[List[str],
Optional[Dict[str, MypyFile]],
Optional[Dict[Expression, Type]]]:
try:
result = build.build(sources=[BuildSource('main', None, source)],
options=options,
alt_lib_path=test_temp_dir)
except CompileError as e:
# TODO: Should perhaps not return None here.
return e.messages, None, None
return result.errors, result.files, result.types

View file

@ -0,0 +1,66 @@
"""Test cases for AST diff (used for fine-grained incremental checking)"""
import os
from typing import List, Tuple, Dict, Optional
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.defaults import PYTHON3_VERSION
from mypy.errors import CompileError
from mypy.nodes import MypyFile
from mypy.options import Options
from mypy.server.astdiff import snapshot_symbol_table, compare_symbol_table_snapshots
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, parse_options
class ASTDiffSuite(DataSuite):
files = [
'diff.test',
]
def run_case(self, testcase: DataDrivenTestCase) -> None:
first_src = '\n'.join(testcase.input)
files_dict = dict(testcase.files)
second_src = files_dict['tmp/next.py']
options = parse_options(first_src, testcase, 1)
messages1, files1 = self.build(first_src, options)
messages2, files2 = self.build(second_src, options)
a = []
if messages1:
a.extend(messages1)
if messages2:
a.append('== next ==')
a.extend(messages2)
assert files1 is not None and files2 is not None, ('cases where CompileError'
' occurred should not be run')
prefix = '__main__'
snapshot1 = snapshot_symbol_table(prefix, files1['__main__'].names)
snapshot2 = snapshot_symbol_table(prefix, files2['__main__'].names)
diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2)
for trigger in sorted(diff):
a.append(trigger)
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(testcase.file,
testcase.line))
def build(self, source: str,
options: Options) -> Tuple[List[str], Optional[Dict[str, MypyFile]]]:
options.use_builtins_fixtures = True
options.show_traceback = True
options.cache_dir = os.devnull
options.python_version = PYTHON3_VERSION
try:
result = build.build(sources=[BuildSource('main', None, source)],
options=options,
alt_lib_path=test_temp_dir)
except CompileError as e:
# TODO: Is it okay to return None?
return e.messages, None
return result.errors, result.files

View file

@ -0,0 +1,46 @@
"""Tests for mypy incremental error output."""
from typing import List
from mypy import build
from mypy.test.helpers import assert_string_arrays_equal
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.modulefinder import BuildSource
from mypy.errors import CompileError
from mypy.options import Options
class ErrorStreamSuite(DataSuite):
required_out_section = True
base_path = '.'
files = ['errorstream.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_error_stream(testcase)
def test_error_stream(testcase: DataDrivenTestCase) -> None:
"""Perform a single error streaming test case.
The argument contains the description of the test case.
"""
options = Options()
options.show_traceback = True
logged_messages: List[str] = []
def flush_errors(msgs: List[str], serious: bool) -> None:
if msgs:
logged_messages.append('==== Errors flushed ====')
logged_messages.extend(msgs)
sources = [BuildSource('main', '__main__', '\n'.join(testcase.input))]
try:
build.build(sources=sources,
options=options,
flush_errors=flush_errors)
except CompileError as e:
assert e.messages == []
assert_string_arrays_equal(testcase.output, logged_messages,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))

View file

@ -0,0 +1,338 @@
"""Test cases for fine-grained incremental checking.
Each test cases runs a batch build followed by one or more fine-grained
incremental steps. We verify that each step produces the expected output.
See the comment at the top of test-data/unit/fine-grained.test for more
information.
N.B.: Unlike most of the other test suites, testfinegrained does not
rely on an alt_lib_path for finding source files. This means that they
can test interactions with the lib_path that is built implicitly based
on specified sources.
"""
import os
import re
from typing import List, Dict, Any, Tuple, Union, cast
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.errors import CompileError
from mypy.options import Options
from mypy.test.config import test_temp_dir
from mypy.test.data import (
DataDrivenTestCase, DataSuite, UpdateFile, DeleteFile
)
from mypy.test.helpers import (
assert_string_arrays_equal, parse_options, assert_module_equivalence,
assert_target_equivalence, perform_file_operations,
)
from mypy.server.mergecheck import check_consistency
from mypy.dmypy_util import DEFAULT_STATUS_FILE
from mypy.dmypy_server import Server
from mypy.config_parser import parse_config_file
from mypy.find_sources import create_source_list
import pytest
# Set to True to perform (somewhat expensive) checks for duplicate AST nodes after merge
CHECK_CONSISTENCY = False
class FineGrainedSuite(DataSuite):
files = [
'fine-grained.test',
'fine-grained-cycles.test',
'fine-grained-blockers.test',
'fine-grained-modules.test',
'fine-grained-follow-imports.test',
'fine-grained-suggest.test',
'fine-grained-attr.test',
]
# Whether to use the fine-grained cache in the testing. This is overridden
# by a trivial subclass to produce a suite that uses the cache.
use_cache = False
def should_skip(self, testcase: DataDrivenTestCase) -> bool:
# Decide whether to skip the test. This could have been structured
# as a filter() classmethod also, but we want the tests reported
# as skipped, not just elided.
if self.use_cache:
if testcase.only_when == '-only_when_nocache':
return True
# TODO: In caching mode we currently don't well support
# starting from cached states with errors in them.
if testcase.output and testcase.output[0] != '==':
return True
else:
if testcase.only_when == '-only_when_cache':
return True
return False
def run_case(self, testcase: DataDrivenTestCase) -> None:
if self.should_skip(testcase):
pytest.skip()
return
main_src = '\n'.join(testcase.input)
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w', encoding='utf8') as f:
f.write(main_src)
options = self.get_options(main_src, testcase, build_cache=False)
build_options = self.get_options(main_src, testcase, build_cache=True)
server = Server(options, DEFAULT_STATUS_FILE)
num_regular_incremental_steps = self.get_build_steps(main_src)
step = 1
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
messages = self.build(build_options, sources)
else:
messages = self.run_check(server, sources)
a = []
if messages:
a.extend(normalize_messages(messages))
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
steps = testcase.find_steps()
all_triggered = []
for operations in steps:
step += 1
output, triggered = self.perform_step(
operations,
server,
options,
build_options,
testcase,
main_src,
step,
num_regular_incremental_steps,
)
a.append('==')
a.extend(output)
all_triggered.extend(triggered)
# Normalize paths in test output (for Windows).
a = [line.replace('\\', '/') for line in a]
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
if testcase.triggered:
assert_string_arrays_equal(
testcase.triggered,
self.format_triggered(all_triggered),
'Invalid active triggers ({}, line {})'.format(testcase.file,
testcase.line))
def get_options(self,
source: str,
testcase: DataDrivenTestCase,
build_cache: bool,) -> Options:
# This handles things like '# flags: --foo'.
options = parse_options(source, testcase, incremental_step=1)
options.incremental = True
options.use_builtins_fixtures = True
options.show_traceback = True
options.error_summary = False
options.fine_grained_incremental = not build_cache
options.use_fine_grained_cache = self.use_cache and not build_cache
options.cache_fine_grained = self.use_cache
options.local_partial_types = True
if re.search('flags:.*--follow-imports', source) is None:
# Override the default for follow_imports
options.follow_imports = 'error'
for name, _ in testcase.files:
if 'mypy.ini' in name or 'pyproject.toml' in name:
parse_config_file(options, lambda: None, name)
break
return options
def run_check(self, server: Server, sources: List[BuildSource]) -> List[str]:
response = server.check(sources, is_tty=False, terminal_width=-1)
out = cast(str, response['out'] or response['err'])
return out.splitlines()
def build(self,
options: Options,
sources: List[BuildSource]) -> List[str]:
try:
result = build.build(sources=sources,
options=options)
except CompileError as e:
return e.messages
return result.errors
def format_triggered(self, triggered: List[List[str]]) -> List[str]:
result = []
for n, triggers in enumerate(triggered):
filtered = [trigger for trigger in triggers
if not trigger.endswith('__>')]
filtered = sorted(filtered)
result.append(('%d: %s' % (n + 2, ', '.join(filtered))).strip())
return result
def get_build_steps(self, program_text: str) -> int:
"""Get the number of regular incremental steps to run, from the test source"""
if not self.use_cache:
return 0
m = re.search('# num_build_steps: ([0-9]+)$', program_text, flags=re.MULTILINE)
if m is not None:
return int(m.group(1))
return 1
def perform_step(self,
operations: List[Union[UpdateFile, DeleteFile]],
server: Server,
options: Options,
build_options: Options,
testcase: DataDrivenTestCase,
main_src: str,
step: int,
num_regular_incremental_steps: int) -> Tuple[List[str], List[List[str]]]:
"""Perform one fine-grained incremental build step (after some file updates/deletions).
Return (mypy output, triggered targets).
"""
perform_file_operations(operations)
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
new_messages = self.build(build_options, sources)
else:
new_messages = self.run_check(server, sources)
updated: List[str] = []
changed: List[str] = []
targets: List[str] = []
triggered = []
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
triggered.append(server.fine_grained_manager.triggered)
updated = server.fine_grained_manager.updated_modules
changed = [mod for mod, file in server.fine_grained_manager.changed_modules]
targets = server.fine_grained_manager.processed_targets
expected_stale = testcase.expected_stale_modules.get(step - 1)
if expected_stale is not None:
assert_module_equivalence(
'stale' + str(step - 1),
expected_stale, changed)
expected_rechecked = testcase.expected_rechecked_modules.get(step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
'rechecked' + str(step - 1),
expected_rechecked, updated)
expected = testcase.expected_fine_grained_targets.get(step)
if expected:
assert_target_equivalence(
'targets' + str(step),
expected, targets)
new_messages = normalize_messages(new_messages)
a = new_messages
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
return a, triggered
def parse_sources(self, program_text: str,
incremental_step: int,
options: Options) -> List[BuildSource]:
"""Return target BuildSources for a test case.
Normally, the unit tests will check all files included in the test
case. This differs from how testcheck works by default, as dmypy
doesn't currently support following imports.
You can override this behavior and instruct the tests to check
multiple modules by using a comment like this in the test case
input:
# cmd: main a.py
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
"""
m = re.search('# cmd: mypy ([a-zA-Z0-9_./ ]+)$', program_text, flags=re.MULTILINE)
regex = '# cmd{}: mypy ([a-zA-Z0-9_./ ]+)$'.format(incremental_step)
alt_m = re.search(regex, program_text, flags=re.MULTILINE)
if alt_m is not None:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default set of files.
paths = [os.path.join(test_temp_dir, path) for path in m.group(1).strip().split()]
return create_source_list(paths, options)
else:
base = BuildSource(os.path.join(test_temp_dir, 'main'), '__main__', None)
# Use expand_dir instead of create_source_list to avoid complaints
# when there aren't any .py files in an increment
return [base] + create_source_list([test_temp_dir], options,
allow_empty_dir=True)
def maybe_suggest(self, step: int, server: Server, src: str, tmp_dir: str) -> List[str]:
output: List[str] = []
targets = self.get_suggest(src, step)
for flags, target in targets:
json = '--json' in flags
callsites = '--callsites' in flags
no_any = '--no-any' in flags
no_errors = '--no-errors' in flags
try_text = '--try-text' in flags
m = re.match('--flex-any=([0-9.]+)', flags)
flex_any = float(m.group(1)) if m else None
m = re.match(r'--use-fixme=(\w+)', flags)
use_fixme = m.group(1) if m else None
m = re.match('--max-guesses=([0-9]+)', flags)
max_guesses = int(m.group(1)) if m else None
res = cast(Dict[str, Any],
server.cmd_suggest(
target.strip(), json=json, no_any=no_any, no_errors=no_errors,
try_text=try_text, flex_any=flex_any, use_fixme=use_fixme,
callsites=callsites, max_guesses=max_guesses))
val = res['error'] if 'error' in res else res['out'] + res['err']
if json:
# JSON contains already escaped \ on Windows, so requires a bit of care.
val = val.replace('\\\\', '\\')
val = val.replace(os.path.realpath(tmp_dir) + os.path.sep, '')
output.extend(val.strip().split('\n'))
return normalize_messages(output)
def get_suggest(self, program_text: str,
incremental_step: int) -> List[Tuple[str, str]]:
step_bit = '1?' if incremental_step == 1 else str(incremental_step)
regex = '# suggest{}: (--[a-zA-Z0-9_\\-./=?^ ]+ )*([a-zA-Z0-9_.:/?^ ]+)$'.format(step_bit)
m = re.findall(regex, program_text, flags=re.MULTILINE)
return m
def normalize_messages(messages: List[str]) -> List[str]:
return [re.sub('^tmp' + re.escape(os.sep), '', message)
for message in messages]

View file

@ -0,0 +1,15 @@
"""Tests for fine-grained incremental checking using the cache.
All of the real code for this lives in testfinegrained.py.
"""
# We can't "import FineGrainedSuite from ..." because that will cause pytest
# to collect the non-caching tests when running this file.
import mypy.test.testfinegrained
class FineGrainedCacheSuite(mypy.test.testfinegrained.FineGrainedSuite):
use_cache = True
test_name_suffix = '_cached'
files = (
mypy.test.testfinegrained.FineGrainedSuite.files + ['fine-grained-cache-incremental.test'])

View file

@ -0,0 +1,51 @@
from unittest import TestCase, main
from mypy.util import trim_source_line, split_words
class FancyErrorFormattingTestCases(TestCase):
def test_trim_source(self) -> None:
assert trim_source_line('0123456789abcdef',
max_len=16, col=5, min_width=2) == ('0123456789abcdef', 0)
# Locations near start.
assert trim_source_line('0123456789abcdef',
max_len=7, col=0, min_width=2) == ('0123456...', 0)
assert trim_source_line('0123456789abcdef',
max_len=7, col=4, min_width=2) == ('0123456...', 0)
# Middle locations.
assert trim_source_line('0123456789abcdef',
max_len=7, col=5, min_width=2) == ('...1234567...', -2)
assert trim_source_line('0123456789abcdef',
max_len=7, col=6, min_width=2) == ('...2345678...', -1)
assert trim_source_line('0123456789abcdef',
max_len=7, col=8, min_width=2) == ('...456789a...', 1)
# Locations near the end.
assert trim_source_line('0123456789abcdef',
max_len=7, col=11, min_width=2) == ('...789abcd...', 4)
assert trim_source_line('0123456789abcdef',
max_len=7, col=13, min_width=2) == ('...9abcdef', 6)
assert trim_source_line('0123456789abcdef',
max_len=7, col=15, min_width=2) == ('...9abcdef', 6)
def test_split_words(self) -> None:
assert split_words('Simple message') == ['Simple', 'message']
assert split_words('Message with "Some[Long, Types]"'
' in it') == ['Message', 'with',
'"Some[Long, Types]"', 'in', 'it']
assert split_words('Message with "Some[Long, Types]"'
' and [error-code]') == ['Message', 'with', '"Some[Long, Types]"',
'and', '[error-code]']
assert split_words('"Type[Stands, First]" then words') == ['"Type[Stands, First]"',
'then', 'words']
assert split_words('First words "Then[Stands, Type]"') == ['First', 'words',
'"Then[Stands, Type]"']
assert split_words('"Type[Only, Here]"') == ['"Type[Only, Here]"']
assert split_words('OneWord') == ['OneWord']
assert split_words(' ') == ['', '']
if __name__ == '__main__':
main()

View file

@ -0,0 +1,100 @@
"""Unit tests for file system cache."""
import os
import shutil
import sys
import tempfile
import unittest
from typing import Optional
from mypy.fscache import FileSystemCache
class TestFileSystemCache(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.mkdtemp()
self.oldcwd = os.getcwd()
os.chdir(self.tempdir)
self.fscache = FileSystemCache()
def tearDown(self) -> None:
os.chdir(self.oldcwd)
shutil.rmtree(self.tempdir)
def test_isfile_case_1(self) -> None:
self.make_file('bar.py')
self.make_file('pkg/sub_package/__init__.py')
self.make_file('pkg/sub_package/foo.py')
# Run twice to test both cached and non-cached code paths.
for i in range(2):
assert self.isfile_case('bar.py')
assert self.isfile_case('pkg/sub_package/__init__.py')
assert self.isfile_case('pkg/sub_package/foo.py')
assert not self.isfile_case('non_existent.py')
assert not self.isfile_case('pkg/non_existent.py')
assert not self.isfile_case('pkg/')
assert not self.isfile_case('bar.py/')
for i in range(2):
assert not self.isfile_case('Bar.py')
assert not self.isfile_case('pkg/sub_package/__init__.PY')
assert not self.isfile_case('pkg/Sub_Package/foo.py')
assert not self.isfile_case('Pkg/sub_package/foo.py')
def test_isfile_case_2(self) -> None:
self.make_file('bar.py')
self.make_file('pkg/sub_package/__init__.py')
self.make_file('pkg/sub_package/foo.py')
# Run twice to test both cached and non-cached code paths.
# This reverses the order of checks from test_isfile_case_1.
for i in range(2):
assert not self.isfile_case('Bar.py')
assert not self.isfile_case('pkg/sub_package/__init__.PY')
assert not self.isfile_case('pkg/Sub_Package/foo.py')
assert not self.isfile_case('Pkg/sub_package/foo.py')
for i in range(2):
assert self.isfile_case('bar.py')
assert self.isfile_case('pkg/sub_package/__init__.py')
assert self.isfile_case('pkg/sub_package/foo.py')
assert not self.isfile_case('non_existent.py')
assert not self.isfile_case('pkg/non_existent.py')
def test_isfile_case_3(self) -> None:
self.make_file('bar.py')
self.make_file('pkg/sub_package/__init__.py')
self.make_file('pkg/sub_package/foo.py')
# Run twice to test both cached and non-cached code paths.
for i in range(2):
assert self.isfile_case('bar.py')
assert not self.isfile_case('non_existent.py')
assert not self.isfile_case('pkg/non_existent.py')
assert not self.isfile_case('Bar.py')
assert not self.isfile_case('pkg/sub_package/__init__.PY')
assert not self.isfile_case('pkg/Sub_Package/foo.py')
assert not self.isfile_case('Pkg/sub_package/foo.py')
assert self.isfile_case('pkg/sub_package/__init__.py')
assert self.isfile_case('pkg/sub_package/foo.py')
def test_isfile_case_other_directory(self) -> None:
self.make_file('bar.py')
with tempfile.TemporaryDirectory() as other:
self.make_file('other_dir.py', base=other)
self.make_file('pkg/other_dir.py', base=other)
assert self.isfile_case(os.path.join(other, 'other_dir.py'))
assert not self.isfile_case(os.path.join(other, 'Other_Dir.py'))
assert not self.isfile_case(os.path.join(other, 'bar.py'))
if sys.platform in ('win32', 'darwin'):
# We only check case for directories under our prefix, and since
# this path is not under the prefix, case difference is fine.
assert self.isfile_case(os.path.join(other, 'PKG/other_dir.py'))
def make_file(self, path: str, base: Optional[str] = None) -> None:
if base is None:
base = self.tempdir
fullpath = os.path.join(base, path)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
if not path.endswith('/'):
with open(fullpath, 'w') as f:
f.write('# test file')
def isfile_case(self, path: str) -> bool:
return self.fscache.isfile_case(os.path.join(self.tempdir, path), self.tempdir)

View file

@ -0,0 +1,82 @@
"""Test cases for graph processing code in build.py."""
import sys
from typing import AbstractSet, Dict, Set, List
from mypy.test.helpers import assert_equal, Suite
from mypy.build import BuildManager, State, BuildSourceSet
from mypy.modulefinder import SearchPaths
from mypy.build import topsort, strongly_connected_components, sorted_components, order_ascc
from mypy.version import __version__
from mypy.options import Options
from mypy.report import Reports
from mypy.plugin import Plugin
from mypy.errors import Errors
from mypy.fscache import FileSystemCache
class GraphSuite(Suite):
def test_topsort(self) -> None:
a = frozenset({'A'})
b = frozenset({'B'})
c = frozenset({'C'})
d = frozenset({'D'})
data: Dict[AbstractSet[str], Set[AbstractSet[str]]] = {a: {b, c}, b: {d}, c: {d}}
res = list(topsort(data))
assert_equal(res, [{d}, {b, c}, {a}])
def test_scc(self) -> None:
vertices = {"A", "B", "C", "D"}
edges: Dict[str, List[str]] = {"A": ["B", "C"], "B": ["C"], "C": ["B", "D"], "D": []}
sccs = set(frozenset(x) for x in strongly_connected_components(vertices, edges))
assert_equal(sccs,
{frozenset({'A'}),
frozenset({'B', 'C'}),
frozenset({'D'})})
def _make_manager(self) -> BuildManager:
errors = Errors()
options = Options()
fscache = FileSystemCache()
search_paths = SearchPaths((), (), (), ())
manager = BuildManager(
data_dir='',
search_paths=search_paths,
ignore_prefix='',
source_set=BuildSourceSet([]),
reports=Reports('', {}),
options=options,
version_id=__version__,
plugin=Plugin(options),
plugins_snapshot={},
errors=errors,
flush_errors=lambda msgs, serious: None,
fscache=fscache,
stdout=sys.stdout,
stderr=sys.stderr,
)
return manager
def test_sorted_components(self) -> None:
manager = self._make_manager()
graph = {'a': State('a', None, 'import b, c', manager),
'd': State('d', None, 'pass', manager),
'b': State('b', None, 'import c', manager),
'c': State('c', None, 'import b, d', manager),
}
res = sorted_components(graph)
assert_equal(res, [frozenset({'d'}), frozenset({'c', 'b'}), frozenset({'a'})])
def test_order_ascc(self) -> None:
manager = self._make_manager()
graph = {'a': State('a', None, 'import b, c', manager),
'd': State('d', None, 'def f(): import a', manager),
'b': State('b', None, 'import c', manager),
'c': State('c', None, 'import b, d', manager),
}
res = sorted_components(graph)
assert_equal(res, [frozenset({'a', 'd', 'c', 'b'})])
ascc = res[0]
scc = order_ascc(graph, ascc)
assert_equal(scc, ['d', 'c', 'b', 'a'])

View file

@ -0,0 +1,458 @@
"""Test cases for type inference helper functions."""
from typing import List, Optional, Tuple, Union, Dict, Set
from mypy.test.helpers import Suite, assert_equal
from mypy.argmap import map_actuals_to_formals
from mypy.checker import group_comparison_operands, DisjointDict
from mypy.literals import Key
from mypy.nodes import ArgKind, ARG_POS, ARG_OPT, ARG_STAR, ARG_STAR2, ARG_NAMED, NameExpr
from mypy.types import AnyType, TupleType, Type, TypeOfAny
from mypy.test.typefixture import TypeFixture
class MapActualsToFormalsSuite(Suite):
"""Test cases for argmap.map_actuals_to_formals."""
def test_basic(self) -> None:
self.assert_map([], [], [])
def test_positional_only(self) -> None:
self.assert_map([ARG_POS],
[ARG_POS],
[[0]])
self.assert_map([ARG_POS, ARG_POS],
[ARG_POS, ARG_POS],
[[0], [1]])
def test_optional(self) -> None:
self.assert_map([],
[ARG_OPT],
[[]])
self.assert_map([ARG_POS],
[ARG_OPT],
[[0]])
self.assert_map([ARG_POS],
[ARG_OPT, ARG_OPT],
[[0], []])
def test_callee_star(self) -> None:
self.assert_map([],
[ARG_STAR],
[[]])
self.assert_map([ARG_POS],
[ARG_STAR],
[[0]])
self.assert_map([ARG_POS, ARG_POS],
[ARG_STAR],
[[0, 1]])
def test_caller_star(self) -> None:
self.assert_map([ARG_STAR],
[ARG_STAR],
[[0]])
self.assert_map([ARG_POS, ARG_STAR],
[ARG_STAR],
[[0, 1]])
self.assert_map([ARG_STAR],
[ARG_POS, ARG_STAR],
[[0], [0]])
self.assert_map([ARG_STAR],
[ARG_OPT, ARG_STAR],
[[0], [0]])
def test_too_many_caller_args(self) -> None:
self.assert_map([ARG_POS],
[],
[])
self.assert_map([ARG_STAR],
[],
[])
self.assert_map([ARG_STAR],
[ARG_POS],
[[0]])
def test_tuple_star(self) -> None:
any_type = AnyType(TypeOfAny.special_form)
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS],
[[0]],
self.tuple(any_type))
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS, ARG_POS],
[[0], [0]],
self.tuple(any_type, any_type))
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS, ARG_OPT, ARG_OPT],
[[0], [0], []],
self.tuple(any_type, any_type))
def tuple(self, *args: Type) -> TupleType:
return TupleType(list(args), TypeFixture().std_tuple)
def test_named_args(self) -> None:
self.assert_map(
['x'],
[(ARG_POS, 'x')],
[[0]])
self.assert_map(
['y', 'x'],
[(ARG_POS, 'x'), (ARG_POS, 'y')],
[[1], [0]])
def test_some_named_args(self) -> None:
self.assert_map(
['y'],
[(ARG_OPT, 'x'), (ARG_OPT, 'y'), (ARG_OPT, 'z')],
[[], [0], []])
def test_missing_named_arg(self) -> None:
self.assert_map(
['y'],
[(ARG_OPT, 'x')],
[[]])
def test_duplicate_named_arg(self) -> None:
self.assert_map(
['x', 'x'],
[(ARG_OPT, 'x')],
[[0, 1]])
def test_varargs_and_bare_asterisk(self) -> None:
self.assert_map(
[ARG_STAR],
[ARG_STAR, (ARG_NAMED, 'x')],
[[0], []])
self.assert_map(
[ARG_STAR, 'x'],
[ARG_STAR, (ARG_NAMED, 'x')],
[[0], [1]])
def test_keyword_varargs(self) -> None:
self.assert_map(
['x'],
[ARG_STAR2],
[[0]])
self.assert_map(
['x', ARG_STAR2],
[ARG_STAR2],
[[0, 1]])
self.assert_map(
['x', ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [1]])
self.assert_map(
[ARG_POS, ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [1]])
def test_both_kinds_of_varargs(self) -> None:
self.assert_map(
[ARG_STAR, ARG_STAR2],
[(ARG_POS, 'x'), (ARG_POS, 'y')],
[[0, 1], [0, 1]])
def test_special_cases(self) -> None:
self.assert_map([ARG_STAR],
[ARG_STAR, ARG_STAR2],
[[0], []])
self.assert_map([ARG_STAR, ARG_STAR2],
[ARG_STAR, ARG_STAR2],
[[0], [1]])
self.assert_map([ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [0]])
self.assert_map([ARG_STAR2],
[ARG_STAR2],
[[0]])
def assert_map(self,
caller_kinds_: List[Union[ArgKind, str]],
callee_kinds_: List[Union[ArgKind, Tuple[ArgKind, str]]],
expected: List[List[int]],
) -> None:
caller_kinds, caller_names = expand_caller_kinds(caller_kinds_)
callee_kinds, callee_names = expand_callee_kinds(callee_kinds_)
result = map_actuals_to_formals(
caller_kinds,
caller_names,
callee_kinds,
callee_names,
lambda i: AnyType(TypeOfAny.special_form))
assert_equal(result, expected)
def assert_vararg_map(self,
caller_kinds: List[ArgKind],
callee_kinds: List[ArgKind],
expected: List[List[int]],
vararg_type: Type,
) -> None:
result = map_actuals_to_formals(
caller_kinds,
[],
callee_kinds,
[],
lambda i: vararg_type)
assert_equal(result, expected)
def expand_caller_kinds(kinds_or_names: List[Union[ArgKind, str]]
) -> Tuple[List[ArgKind], List[Optional[str]]]:
kinds = []
names: List[Optional[str]] = []
for k in kinds_or_names:
if isinstance(k, str):
kinds.append(ARG_NAMED)
names.append(k)
else:
kinds.append(k)
names.append(None)
return kinds, names
def expand_callee_kinds(kinds_and_names: List[Union[ArgKind, Tuple[ArgKind, str]]]
) -> Tuple[List[ArgKind], List[Optional[str]]]:
kinds = []
names: List[Optional[str]] = []
for v in kinds_and_names:
if isinstance(v, tuple):
kinds.append(v[0])
names.append(v[1])
else:
kinds.append(v)
names.append(None)
return kinds, names
class OperandDisjointDictSuite(Suite):
"""Test cases for checker.DisjointDict, which is used for type inference with operands."""
def new(self) -> DisjointDict[int, str]:
return DisjointDict()
def test_independent_maps(self) -> None:
d = self.new()
d.add_mapping({0, 1}, {"group1"})
d.add_mapping({2, 3, 4}, {"group2"})
d.add_mapping({5, 6, 7}, {"group3"})
self.assertEqual(d.items(), [
({0, 1}, {"group1"}),
({2, 3, 4}, {"group2"}),
({5, 6, 7}, {"group3"}),
])
def test_partial_merging(self) -> None:
d = self.new()
d.add_mapping({0, 1}, {"group1"})
d.add_mapping({1, 2}, {"group2"})
d.add_mapping({3, 4}, {"group3"})
d.add_mapping({5, 0}, {"group4"})
d.add_mapping({5, 6}, {"group5"})
d.add_mapping({4, 7}, {"group6"})
self.assertEqual(d.items(), [
({0, 1, 2, 5, 6}, {"group1", "group2", "group4", "group5"}),
({3, 4, 7}, {"group3", "group6"}),
])
def test_full_merging(self) -> None:
d = self.new()
d.add_mapping({0, 1, 2}, {"a"})
d.add_mapping({3, 4, 2}, {"b"})
d.add_mapping({10, 11, 12}, {"c"})
d.add_mapping({13, 14, 15}, {"d"})
d.add_mapping({14, 10, 16}, {"e"})
d.add_mapping({0, 10}, {"f"})
self.assertEqual(d.items(), [
({0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16}, {"a", "b", "c", "d", "e", "f"}),
])
def test_merge_with_multiple_overlaps(self) -> None:
d = self.new()
d.add_mapping({0, 1, 2}, {"a"})
d.add_mapping({3, 4, 5}, {"b"})
d.add_mapping({1, 2, 4, 5}, {"c"})
d.add_mapping({6, 1, 2, 4, 5}, {"d"})
d.add_mapping({6, 1, 2, 4, 5}, {"e"})
self.assertEqual(d.items(), [
({0, 1, 2, 3, 4, 5, 6}, {"a", "b", "c", "d", "e"}),
])
class OperandComparisonGroupingSuite(Suite):
"""Test cases for checker.group_comparison_operands."""
def literal_keymap(self, assignable_operands: Dict[int, NameExpr]) -> Dict[int, Key]:
output: Dict[int, Key] = {}
for index, expr in assignable_operands.items():
output[index] = ('FakeExpr', expr.name)
return output
def test_basic_cases(self) -> None:
# Note: the grouping function doesn't actually inspect the input exprs, so we
# just default to using NameExprs for simplicity.
x0 = NameExpr('x0')
x1 = NameExpr('x1')
x2 = NameExpr('x2')
x3 = NameExpr('x3')
x4 = NameExpr('x4')
basic_input = [('==', x0, x1), ('==', x1, x2), ('<', x2, x3), ('==', x3, x4)]
none_assignable = self.literal_keymap({})
all_assignable = self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x4})
for assignable in [none_assignable, all_assignable]:
self.assertEqual(
group_comparison_operands(basic_input, assignable, set()),
[('==', [0, 1]), ('==', [1, 2]), ('<', [2, 3]), ('==', [3, 4])],
)
self.assertEqual(
group_comparison_operands(basic_input, assignable, {'=='}),
[('==', [0, 1, 2]), ('<', [2, 3]), ('==', [3, 4])],
)
self.assertEqual(
group_comparison_operands(basic_input, assignable, {'<'}),
[('==', [0, 1]), ('==', [1, 2]), ('<', [2, 3]), ('==', [3, 4])],
)
self.assertEqual(
group_comparison_operands(basic_input, assignable, {'==', '<'}),
[('==', [0, 1, 2]), ('<', [2, 3]), ('==', [3, 4])],
)
def test_multiple_groups(self) -> None:
x0 = NameExpr('x0')
x1 = NameExpr('x1')
x2 = NameExpr('x2')
x3 = NameExpr('x3')
x4 = NameExpr('x4')
x5 = NameExpr('x5')
self.assertEqual(
group_comparison_operands(
[('==', x0, x1), ('==', x1, x2), ('is', x2, x3), ('is', x3, x4)],
self.literal_keymap({}),
{'==', 'is'},
),
[('==', [0, 1, 2]), ('is', [2, 3, 4])],
)
self.assertEqual(
group_comparison_operands(
[('==', x0, x1), ('==', x1, x2), ('==', x2, x3), ('==', x3, x4)],
self.literal_keymap({}),
{'==', 'is'},
),
[('==', [0, 1, 2, 3, 4])],
)
self.assertEqual(
group_comparison_operands(
[('is', x0, x1), ('==', x1, x2), ('==', x2, x3), ('==', x3, x4)],
self.literal_keymap({}),
{'==', 'is'},
),
[('is', [0, 1]), ('==', [1, 2, 3, 4])],
)
self.assertEqual(
group_comparison_operands(
[('is', x0, x1), ('is', x1, x2), ('<', x2, x3), ('==', x3, x4), ('==', x4, x5)],
self.literal_keymap({}),
{'==', 'is'},
),
[('is', [0, 1, 2]), ('<', [2, 3]), ('==', [3, 4, 5])],
)
def test_multiple_groups_coalescing(self) -> None:
x0 = NameExpr('x0')
x1 = NameExpr('x1')
x2 = NameExpr('x2')
x3 = NameExpr('x3')
x4 = NameExpr('x4')
nothing_combined = [('==', [0, 1, 2]), ('<', [2, 3]), ('==', [3, 4, 5])]
everything_combined = [('==', [0, 1, 2, 3, 4, 5]), ('<', [2, 3])]
# Note: We do 'x4 == x0' at the very end!
two_groups = [
('==', x0, x1), ('==', x1, x2), ('<', x2, x3), ('==', x3, x4), ('==', x4, x0),
]
self.assertEqual(
group_comparison_operands(
two_groups,
self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x4, 5: x0}),
{'=='},
),
everything_combined,
"All vars are assignable, everything is combined"
)
self.assertEqual(
group_comparison_operands(
two_groups,
self.literal_keymap({1: x1, 2: x2, 3: x3, 4: x4}),
{'=='},
),
nothing_combined,
"x0 is unassignable, so no combining"
)
self.assertEqual(
group_comparison_operands(
two_groups,
self.literal_keymap({0: x0, 1: x1, 3: x3, 5: x0}),
{'=='},
),
everything_combined,
"Some vars are unassignable but x0 is, so we combine"
)
self.assertEqual(
group_comparison_operands(
two_groups,
self.literal_keymap({0: x0, 5: x0}),
{'=='},
),
everything_combined,
"All vars are unassignable but x0 is, so we combine"
)
def test_multiple_groups_different_operators(self) -> None:
x0 = NameExpr('x0')
x1 = NameExpr('x1')
x2 = NameExpr('x2')
x3 = NameExpr('x3')
groups = [('==', x0, x1), ('==', x1, x2), ('is', x2, x3), ('is', x3, x0)]
keymap = self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x0})
self.assertEqual(
group_comparison_operands(groups, keymap, {'==', 'is'}),
[('==', [0, 1, 2]), ('is', [2, 3, 4])],
"Different operators can never be combined"
)
def test_single_pair(self) -> None:
x0 = NameExpr('x0')
x1 = NameExpr('x1')
single_comparison = [('==', x0, x1)]
expected_output = [('==', [0, 1])]
assignable_combinations: List[Dict[int, NameExpr]] = [
{}, {0: x0}, {1: x1}, {0: x0, 1: x1},
]
to_group_by: List[Set[str]] = [set(), {"=="}, {"is"}]
for combo in assignable_combinations:
for operators in to_group_by:
keymap = self.literal_keymap(combo)
self.assertEqual(
group_comparison_operands(single_comparison, keymap, operators),
expected_output,
)
def test_empty_pair_list(self) -> None:
# This case should never occur in practice -- ComparisionExprs
# always contain at least one comparison. But in case it does...
self.assertEqual(group_comparison_operands([], {}, set()), [])
self.assertEqual(group_comparison_operands([], {}, {'=='}), [])

View file

@ -0,0 +1,74 @@
from unittest import TestCase, main
from multiprocessing import Process, Queue
from mypy.ipc import IPCClient, IPCServer
import pytest
import sys
import time
CONNECTION_NAME = 'dmypy-test-ipc'
def server(msg: str, q: 'Queue[str]') -> None:
server = IPCServer(CONNECTION_NAME)
q.put(server.connection_name)
data = b''
while not data:
with server:
server.write(msg.encode())
data = server.read()
server.cleanup()
class IPCTests(TestCase):
def test_transaction_large(self) -> None:
queue: Queue[str] = Queue()
msg = 't' * 200000 # longer than the max read size of 100_000
p = Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'test')
queue.close()
queue.join_thread()
p.join()
def test_connect_twice(self) -> None:
queue: Queue[str] = Queue()
msg = 'this is a test message'
p = Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'') # don't let the server hang up yet, we want to connect again.
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'test')
queue.close()
queue.join_thread()
p.join()
assert p.exitcode == 0
# Run test_connect_twice a lot, in the hopes of finding issues.
# This is really slow, so it is skipped, but can be enabled if
# needed to debug IPC issues.
@pytest.mark.skip
def test_connect_alot(self) -> None:
t0 = time.time()
for i in range(1000):
try:
print(i, 'start')
self.test_connect_twice()
finally:
t1 = time.time()
print(i, t1 - t0)
sys.stdout.flush()
t0 = t1
if __name__ == '__main__':
main()

View file

@ -0,0 +1,243 @@
"""Test cases for AST merge (used for fine-grained incremental checking)"""
import os
import shutil
from typing import List, Tuple, Dict, Optional
from mypy import build
from mypy.build import BuildResult
from mypy.modulefinder import BuildSource
from mypy.defaults import PYTHON3_VERSION
from mypy.errors import CompileError
from mypy.nodes import (
Node, MypyFile, SymbolTable, SymbolTableNode, TypeInfo, Expression, Var, TypeVarExpr,
UNBOUND_IMPORTED
)
from mypy.server.subexpr import get_subexpressions
from mypy.server.update import FineGrainedBuildManager
from mypy.strconv import StrConv
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages, parse_options
from mypy.types import TypeStrVisitor, Type
from mypy.util import short_type, IdMapper
# Which data structures to dump in a test case?
SYMTABLE = 'SYMTABLE'
TYPEINFO = ' TYPEINFO'
TYPES = 'TYPES'
AST = 'AST'
NOT_DUMPED_MODULES = (
'builtins',
'typing',
'abc',
'contextlib',
'sys',
'mypy_extensions',
'typing_extensions',
'enum',
)
class ASTMergeSuite(DataSuite):
files = ['merge.test']
def setup(self) -> None:
super().setup()
self.str_conv = StrConv(show_ids=True)
assert self.str_conv.id_mapper is not None
self.id_mapper: IdMapper = self.str_conv.id_mapper
self.type_str_conv = TypeStrVisitor(self.id_mapper)
def run_case(self, testcase: DataDrivenTestCase) -> None:
name = testcase.name
# We use the test case name to decide which data structures to dump.
# Dumping everything would result in very verbose test cases.
if name.endswith('_symtable'):
kind = SYMTABLE
elif name.endswith('_typeinfo'):
kind = TYPEINFO
elif name.endswith('_types'):
kind = TYPES
else:
kind = AST
main_src = '\n'.join(testcase.input)
result = self.build(main_src, testcase)
assert result is not None, 'cases where CompileError occurred should not be run'
result.manager.fscache.flush()
fine_grained_manager = FineGrainedBuildManager(result)
a = []
if result.errors:
a.extend(result.errors)
target_path = os.path.join(test_temp_dir, 'target.py')
shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path)
a.extend(self.dump(fine_grained_manager, kind))
old_subexpr = get_subexpressions(result.manager.modules['target'])
a.append('==>')
new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path)
a.extend(self.dump(fine_grained_manager, kind))
for expr in old_subexpr:
if isinstance(expr, TypeVarExpr):
# These are merged so we can't perform the check.
continue
# Verify that old AST nodes are removed from the expression type map.
assert expr not in new_types
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(testcase.file,
testcase.line))
def build(self, source: str, testcase: DataDrivenTestCase) -> Optional[BuildResult]:
options = parse_options(source, testcase, incremental_step=1)
options.incremental = True
options.fine_grained_incremental = True
options.use_builtins_fixtures = True
options.export_types = True
options.show_traceback = True
options.python_version = PYTHON3_VERSION
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w', encoding='utf8') as f:
f.write(source)
try:
result = build.build(sources=[BuildSource(main_path, None, None)],
options=options,
alt_lib_path=test_temp_dir)
except CompileError:
# TODO: Is it okay to return None?
return None
return result
def build_increment(self, manager: FineGrainedBuildManager,
module_id: str, path: str) -> Tuple[MypyFile,
Dict[Expression, Type]]:
manager.flush_cache()
manager.update([(module_id, path)], [])
module = manager.manager.modules[module_id]
type_map = manager.graph[module_id].type_map()
return module, type_map
def dump(self,
manager: FineGrainedBuildManager,
kind: str) -> List[str]:
modules = manager.manager.modules
if kind == AST:
return self.dump_asts(modules)
elif kind == TYPEINFO:
return self.dump_typeinfos(modules)
elif kind == SYMTABLE:
return self.dump_symbol_tables(modules)
elif kind == TYPES:
return self.dump_types(manager)
assert False, 'Invalid kind %s' % kind
def dump_asts(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for m in sorted(modules):
if m in NOT_DUMPED_MODULES:
# We don't support incremental checking of changes to builtins, etc.
continue
s = modules[m].accept(self.str_conv)
a.extend(s.splitlines())
return a
def dump_symbol_tables(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
# We don't support incremental checking of changes to builtins, etc.
continue
a.extend(self.dump_symbol_table(id, modules[id].names))
return a
def dump_symbol_table(self, module_id: str, symtable: SymbolTable) -> List[str]:
a = ['{}:'.format(module_id)]
for name in sorted(symtable):
if name.startswith('__'):
continue
a.append(' {}: {}'.format(name, self.format_symbol_table_node(symtable[name])))
return a
def format_symbol_table_node(self, node: SymbolTableNode) -> str:
if node.node is None:
if node.kind == UNBOUND_IMPORTED:
return 'UNBOUND_IMPORTED'
return 'None'
if isinstance(node.node, Node):
s = '{}<{}>'.format(str(type(node.node).__name__),
self.id_mapper.id(node.node))
else:
s = '? ({})'.format(type(node.node))
if (isinstance(node.node, Var) and node.node.type and
not node.node.fullname.startswith('typing.')):
typestr = self.format_type(node.node.type)
s += '({})'.format(typestr)
return s
def dump_typeinfos(self, modules: Dict[str, MypyFile]) -> List[str]:
a = []
for id in sorted(modules):
if not is_dumped_module(id):
continue
a.extend(self.dump_typeinfos_recursive(modules[id].names))
return a
def dump_typeinfos_recursive(self, names: SymbolTable) -> List[str]:
a = []
for name, node in sorted(names.items(), key=lambda x: x[0]):
if isinstance(node.node, TypeInfo):
a.extend(self.dump_typeinfo(node.node))
a.extend(self.dump_typeinfos_recursive(node.node.names))
return a
def dump_typeinfo(self, info: TypeInfo) -> List[str]:
if info.fullname == 'enum.Enum':
# Avoid noise
return []
s = info.dump(str_conv=self.str_conv,
type_str_conv=self.type_str_conv)
return s.splitlines()
def dump_types(self, manager: FineGrainedBuildManager) -> List[str]:
a = []
# To make the results repeatable, we try to generate unique and
# deterministic sort keys.
for module_id in sorted(manager.manager.modules):
if not is_dumped_module(module_id):
continue
all_types = manager.manager.all_types
# Compute a module type map from the global type map
tree = manager.graph[module_id].tree
assert tree is not None
type_map = {node: all_types[node]
for node in get_subexpressions(tree)
if node in all_types}
if type_map:
a.append('## {}'.format(module_id))
for expr in sorted(type_map, key=lambda n: (n.line, short_type(n),
str(n) + str(type_map[n]))):
typ = type_map[expr]
a.append('{}:{}: {}'.format(short_type(expr),
expr.line,
self.format_type(typ)))
return a
def format_type(self, typ: Type) -> str:
return typ.accept(self.type_str_conv)
def is_dumped_module(id: str) -> bool:
return id not in NOT_DUMPED_MODULES and (not id.startswith('_') or id == '__main__')

View file

@ -0,0 +1,293 @@
import os
from mypy.options import Options
from mypy.modulefinder import (
FindModuleCache,
SearchPaths,
ModuleNotFoundReason,
expand_site_packages
)
from mypy.test.helpers import Suite, assert_equal
from mypy.test.config import package_path
data_path = os.path.relpath(os.path.join(package_path, "modulefinder"))
class ModuleFinderSuite(Suite):
def setUp(self) -> None:
self.search_paths = SearchPaths(
python_path=(),
mypy_path=(
os.path.join(data_path, "nsx-pkg1"),
os.path.join(data_path, "nsx-pkg2"),
os.path.join(data_path, "nsx-pkg3"),
os.path.join(data_path, "nsy-pkg1"),
os.path.join(data_path, "nsy-pkg2"),
os.path.join(data_path, "pkg1"),
os.path.join(data_path, "pkg2"),
),
package_path=(),
typeshed_path=(),
)
options = Options()
options.namespace_packages = True
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
options = Options()
options.namespace_packages = False
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
def test__no_namespace_packages__nsx(self) -> None:
"""
If namespace_packages is False, we shouldn't find nsx
"""
found_module = self.fmc_nons.find_module("nsx")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
def test__no_namespace_packages__nsx_a(self) -> None:
"""
If namespace_packages is False, we shouldn't find nsx.a.
"""
found_module = self.fmc_nons.find_module("nsx.a")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
def test__no_namespace_packages__find_a_in_pkg1(self) -> None:
"""
Find find pkg1/a.py for "a" with namespace_packages False.
"""
found_module = self.fmc_nons.find_module("a")
expected = os.path.join(data_path, "pkg1", "a.py")
assert_equal(expected, found_module)
def test__no_namespace_packages__find_b_in_pkg2(self) -> None:
found_module = self.fmc_ns.find_module("b")
expected = os.path.join(data_path, "pkg2", "b", "__init__.py")
assert_equal(expected, found_module)
def test__find_nsx_as_namespace_pkg_in_pkg1(self) -> None:
"""
There's no __init__.py in any of the nsx dirs, return
the path to the first one found in mypypath.
"""
found_module = self.fmc_ns.find_module("nsx")
expected = os.path.join(data_path, "nsx-pkg1", "nsx")
assert_equal(expected, found_module)
def test__find_nsx_a_init_in_pkg1(self) -> None:
"""
Find nsx-pkg1/nsx/a/__init__.py for "nsx.a" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.a")
expected = os.path.join(data_path, "nsx-pkg1", "nsx", "a", "__init__.py")
assert_equal(expected, found_module)
def test__find_nsx_b_init_in_pkg2(self) -> None:
"""
Find nsx-pkg2/nsx/b/__init__.py for "nsx.b" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.b")
expected = os.path.join(data_path, "nsx-pkg2", "nsx", "b", "__init__.py")
assert_equal(expected, found_module)
def test__find_nsx_c_c_in_pkg3(self) -> None:
"""
Find nsx-pkg3/nsx/c/c.py for "nsx.c.c" in namespace mode.
"""
found_module = self.fmc_ns.find_module("nsx.c.c")
expected = os.path.join(data_path, "nsx-pkg3", "nsx", "c", "c.py")
assert_equal(expected, found_module)
def test__find_nsy_a__init_pyi(self) -> None:
"""
Prefer nsy-pkg1/a/__init__.pyi file over __init__.py.
"""
found_module = self.fmc_ns.find_module("nsy.a")
expected = os.path.join(data_path, "nsy-pkg1", "nsy", "a", "__init__.pyi")
assert_equal(expected, found_module)
def test__find_nsy_b__init_py(self) -> None:
"""
There is a nsy-pkg2/nsy/b.pyi, but also a nsy-pkg2/nsy/b/__init__.py.
We expect to find the latter when looking up "nsy.b" as
a package is preferred over a module.
"""
found_module = self.fmc_ns.find_module("nsy.b")
expected = os.path.join(data_path, "nsy-pkg2", "nsy", "b", "__init__.py")
assert_equal(expected, found_module)
def test__find_nsy_c_pyi(self) -> None:
"""
There is a nsy-pkg2/nsy/c.pyi and nsy-pkg2/nsy/c.py
We expect to find the former when looking up "nsy.b" as
.pyi is preferred over .py.
"""
found_module = self.fmc_ns.find_module("nsy.c")
expected = os.path.join(data_path, "nsy-pkg2", "nsy", "c.pyi")
assert_equal(expected, found_module)
def test__find_a_in_pkg1(self) -> None:
found_module = self.fmc_ns.find_module("a")
expected = os.path.join(data_path, "pkg1", "a.py")
assert_equal(expected, found_module)
def test__find_b_init_in_pkg2(self) -> None:
found_module = self.fmc_ns.find_module("b")
expected = os.path.join(data_path, "pkg2", "b", "__init__.py")
assert_equal(expected, found_module)
def test__find_d_nowhere(self) -> None:
found_module = self.fmc_ns.find_module("d")
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
class ModuleFinderSitePackagesSuite(Suite):
def setUp(self) -> None:
self.package_dir = os.path.relpath(os.path.join(
package_path,
"modulefinder-site-packages",
))
egg_dirs, site_packages = expand_site_packages([self.package_dir])
self.search_paths = SearchPaths(
python_path=(),
mypy_path=(os.path.join(data_path, "pkg1"),),
package_path=tuple(egg_dirs + site_packages),
typeshed_path=(),
)
options = Options()
options.namespace_packages = True
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
options = Options()
options.namespace_packages = False
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
def path(self, *parts: str) -> str:
return os.path.join(self.package_dir, *parts)
def test__packages_with_ns(self) -> None:
cases = [
# Namespace package with py.typed
("ns_pkg_typed", self.path("ns_pkg_typed")),
("ns_pkg_typed.a", self.path("ns_pkg_typed", "a.py")),
("ns_pkg_typed.b", self.path("ns_pkg_typed", "b")),
("ns_pkg_typed.b.c", self.path("ns_pkg_typed", "b", "c.py")),
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Namespace package without py.typed
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Namespace package without stub package
("ns_pkg_w_stubs", self.path("ns_pkg_w_stubs")),
("ns_pkg_w_stubs.typed", self.path("ns_pkg_w_stubs-stubs", "typed", "__init__.pyi")),
("ns_pkg_w_stubs.typed_inline",
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py")),
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Regular package with py.typed
("pkg_typed", self.path("pkg_typed", "__init__.py")),
("pkg_typed.a", self.path("pkg_typed", "a.py")),
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Regular package without py.typed
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Top-level Python file in site-packages
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Packages found by following .pth files
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
("ns_baz_pkg.a", self.path("baz", "ns_baz_pkg", "a.py")),
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
("ns_neighbor_pkg.a", self.path("..", "modulefinder-src", "ns_neighbor_pkg", "a.py")),
# Something that doesn't exist
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
# A regular package with an installed set of stubs
("foo.bar", self.path("foo-stubs", "bar.pyi")),
# A regular, non-site-packages module
("a", os.path.join(data_path, "pkg1", "a.py")),
]
for module, expected in cases:
template = "Find(" + module + ") got {}; expected {}"
actual = self.fmc_ns.find_module(module)
assert_equal(actual, expected, template)
def test__packages_without_ns(self) -> None:
cases = [
# Namespace package with py.typed
("ns_pkg_typed", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.a", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.b", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.b.c", ModuleNotFoundReason.NOT_FOUND),
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Namespace package without py.typed
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Namespace package without stub package
("ns_pkg_w_stubs", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_w_stubs.typed", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("ns_pkg_w_stubs.typed_inline",
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py")),
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Regular package with py.typed
("pkg_typed", self.path("pkg_typed", "__init__.py")),
("pkg_typed.a", self.path("pkg_typed", "a.py")),
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
# Regular package without py.typed
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Top-level Python file in site-packages
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
# Packages found by following .pth files
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
("ns_baz_pkg.a", ModuleNotFoundReason.NOT_FOUND),
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
("ns_neighbor_pkg.a", ModuleNotFoundReason.NOT_FOUND),
# Something that doesn't exist
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
# A regular package with an installed set of stubs
("foo.bar", self.path("foo-stubs", "bar.pyi")),
# A regular, non-site-packages module
("a", os.path.join(data_path, "pkg1", "a.py")),
]
for module, expected in cases:
template = "Find(" + module + ") got {}; expected {}"
actual = self.fmc_nons.find_module(module)
assert_equal(actual, expected, template)

View file

@ -0,0 +1,12 @@
"""A basic check to make sure that we are using a mypyc-compiled version when expected."""
import mypy
from unittest import TestCase
import os
class MypycTest(TestCase):
def test_using_mypyc(self) -> None:
if os.getenv('TEST_MYPYC', None) == '1':
assert not mypy.__file__.endswith('.py'), "Expected to find a mypyc-compiled version"

View file

@ -0,0 +1,87 @@
"""Tests for the mypy parser."""
import sys
from pytest import skip
from mypy import defaults
from mypy.test.helpers import assert_string_arrays_equal, parse_options
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.parse import parse
from mypy.errors import CompileError
from mypy.options import Options
class ParserSuite(DataSuite):
required_out_section = True
base_path = '.'
files = ['parse.test',
'parse-python2.test']
if sys.version_info >= (3, 10):
files.append('parse-python310.test')
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_parser(testcase)
def test_parser(testcase: DataDrivenTestCase) -> None:
"""Perform a single parser test case.
The argument contains the description of the test case.
"""
options = Options()
if testcase.file.endswith('python2.test'):
options.python_version = defaults.PYTHON2_VERSION
elif testcase.file.endswith('python310.test'):
options.python_version = (3, 10)
else:
options.python_version = defaults.PYTHON3_VERSION
try:
n = parse(bytes('\n'.join(testcase.input), 'ascii'),
fnam='main',
module='__main__',
errors=None,
options=options)
a = str(n).split('\n')
except CompileError as e:
a = e.messages
assert_string_arrays_equal(testcase.output, a,
'Invalid parser output ({}, line {})'.format(
testcase.file, testcase.line))
# The file name shown in test case output. This is displayed in error
# messages, and must match the file name in the test case descriptions.
INPUT_FILE_NAME = 'file'
class ParseErrorSuite(DataSuite):
required_out_section = True
base_path = '.'
files = ['parse-errors.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_parse_error(testcase)
def test_parse_error(testcase: DataDrivenTestCase) -> None:
try:
options = parse_options('\n'.join(testcase.input), testcase, 0)
if options.python_version != sys.version_info[:2]:
skip()
# Compile temporary file. The test file contains non-ASCII characters.
parse(bytes('\n'.join(testcase.input), 'utf-8'), INPUT_FILE_NAME, '__main__', None,
options)
raise AssertionError('No errors reported')
except CompileError as e:
if e.module_with_blocker is not None:
assert e.module_with_blocker == '__main__'
# Verify that there was a compile error and that the error messages
# are equivalent.
assert_string_arrays_equal(
testcase.output, e.messages,
'Invalid compiler output ({}, line {})'.format(testcase.file,
testcase.line))

View file

@ -0,0 +1,206 @@
from contextlib import contextmanager
import os
import pytest
import re
import subprocess
from subprocess import PIPE
import sys
import tempfile
from typing import Tuple, List, Generator
import mypy.api
from mypy.test.config import package_path
from mypy.util import try_find_python2_interpreter
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.config import test_temp_dir
from mypy.test.helpers import assert_string_arrays_equal, perform_file_operations
# NOTE: options.use_builtins_fixtures should not be set in these
# tests, otherwise mypy will ignore installed third-party packages.
class PEP561Suite(DataSuite):
files = [
'pep561.test',
]
base_path = '.'
def run_case(self, test_case: DataDrivenTestCase) -> None:
test_pep561(test_case)
@contextmanager
def virtualenv(
python_executable: str = sys.executable
) -> Generator[Tuple[str, str], None, None]:
"""Context manager that creates a virtualenv in a temporary directory
returns the path to the created Python executable"""
# Sadly, we need virtualenv, as the Python 3 venv module does not support creating a venv
# for Python 2, and Python 2 does not have its own venv.
with tempfile.TemporaryDirectory() as venv_dir:
proc = subprocess.run([sys.executable,
'-m',
'virtualenv',
'-p{}'.format(python_executable),
venv_dir], cwd=os.getcwd(), stdout=PIPE, stderr=PIPE)
if proc.returncode != 0:
err = proc.stdout.decode('utf-8') + proc.stderr.decode('utf-8')
raise Exception("Failed to create venv. Do you have virtualenv installed?\n" + err)
if sys.platform == 'win32':
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'Scripts', 'python'))
else:
yield venv_dir, os.path.abspath(os.path.join(venv_dir, 'bin', 'python'))
def install_package(pkg: str,
python_executable: str = sys.executable,
use_pip: bool = True,
editable: bool = False) -> None:
"""Install a package from test-data/packages/pkg/"""
working_dir = os.path.join(package_path, pkg)
with tempfile.TemporaryDirectory() as dir:
if use_pip:
install_cmd = [python_executable, '-m', 'pip', 'install']
if editable:
install_cmd.append('-e')
install_cmd.append('.')
else:
install_cmd = [python_executable, 'setup.py']
if editable:
install_cmd.append('develop')
else:
install_cmd.append('install')
# Note that newer versions of pip (21.3+) don't
# follow this env variable, but this is for compatibility
env = {'PIP_BUILD': dir}
# Inherit environment for Windows
env.update(os.environ)
proc = subprocess.run(install_cmd,
cwd=working_dir,
stdout=PIPE,
stderr=PIPE,
env=env)
if proc.returncode != 0:
raise Exception(proc.stdout.decode('utf-8') + proc.stderr.decode('utf-8'))
def test_pep561(testcase: DataDrivenTestCase) -> None:
"""Test running mypy on files that depend on PEP 561 packages."""
assert testcase.old_cwd is not None, "test was not properly set up"
if 'python2' in testcase.name.lower():
python = try_find_python2_interpreter()
if python is None:
pytest.skip()
else:
python = sys.executable
assert python is not None, "Should be impossible"
pkgs, pip_args = parse_pkgs(testcase.input[0])
mypy_args = parse_mypy_args(testcase.input[1])
use_pip = True
editable = False
for arg in pip_args:
if arg == 'no-pip':
use_pip = False
elif arg == 'editable':
editable = True
assert pkgs != [], "No packages to install for PEP 561 test?"
with virtualenv(python) as venv:
venv_dir, python_executable = venv
for pkg in pkgs:
install_package(pkg, python_executable, use_pip, editable)
cmd_line = list(mypy_args)
has_program = not ('-p' in cmd_line or '--package' in cmd_line)
if has_program:
program = testcase.name + '.py'
with open(program, 'w', encoding='utf-8') as f:
for s in testcase.input:
f.write('{}\n'.format(s))
cmd_line.append(program)
cmd_line.extend(['--no-error-summary'])
if python_executable != sys.executable:
cmd_line.append('--python-executable={}'.format(python_executable))
steps = testcase.find_steps()
if steps != [[]]:
steps = [[]] + steps # type: ignore[operator,assignment]
for i, operations in enumerate(steps):
perform_file_operations(operations)
output = []
# Type check the module
out, err, returncode = mypy.api.run(cmd_line)
# split lines, remove newlines, and remove directory of test case
for line in (out + err).splitlines():
if line.startswith(test_temp_dir + os.sep):
output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n"))
else:
# Normalize paths so that the output is the same on Windows and Linux/macOS.
line = line.replace(test_temp_dir + os.sep, test_temp_dir + '/')
output.append(line.rstrip("\r\n"))
iter_count = '' if i == 0 else ' on iteration {}'.format(i + 1)
expected = testcase.output if i == 0 else testcase.output2.get(i + 1, [])
assert_string_arrays_equal(expected, output,
'Invalid output ({}, line {}){}'.format(
testcase.file, testcase.line, iter_count))
if has_program:
os.remove(program)
def parse_pkgs(comment: str) -> Tuple[List[str], List[str]]:
if not comment.startswith('# pkgs:'):
return ([], [])
else:
pkgs_str, *args = comment[7:].split(';')
return ([pkg.strip() for pkg in pkgs_str.split(',')], [arg.strip() for arg in args])
def parse_mypy_args(line: str) -> List[str]:
m = re.match('# flags: (.*)$', line)
if not m:
return [] # No args; mypy will spit out an error.
return m.group(1).split()
def test_mypy_path_is_respected() -> None:
assert False
packages = 'packages'
pkg_name = 'a'
with tempfile.TemporaryDirectory() as temp_dir:
old_dir = os.getcwd()
os.chdir(temp_dir)
try:
# Create the pkg for files to go into
full_pkg_name = os.path.join(temp_dir, packages, pkg_name)
os.makedirs(full_pkg_name)
# Create the empty __init__ file to declare a package
pkg_init_name = os.path.join(temp_dir, packages, pkg_name, '__init__.py')
open(pkg_init_name, 'w', encoding='utf8').close()
mypy_config_path = os.path.join(temp_dir, 'mypy.ini')
with open(mypy_config_path, 'w') as mypy_file:
mypy_file.write('[mypy]\n')
mypy_file.write('mypy_path = ./{}\n'.format(packages))
with virtualenv() as venv:
venv_dir, python_executable = venv
cmd_line_args = []
if python_executable != sys.executable:
cmd_line_args.append('--python-executable={}'.format(python_executable))
cmd_line_args.extend(['--config-file', mypy_config_path,
'--package', pkg_name])
out, err, returncode = mypy.api.run(cmd_line_args)
assert returncode == 0
finally:
os.chdir(old_dir)

View file

@ -0,0 +1,116 @@
"""Test cases for running mypy programs using a Python interpreter.
Each test case type checks a program then runs it using Python. The
output (stdout) of the program is compared to expected output. Type checking
uses full builtins and other stubs.
Note: Currently Python interpreter paths are hard coded.
Note: These test cases are *not* included in the main test suite, as including
this suite would slow down the main suite too much.
"""
import os
import os.path
import re
import subprocess
from subprocess import PIPE
import sys
from tempfile import TemporaryDirectory
import pytest
from typing import List
from mypy.defaults import PYTHON3_VERSION
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, split_lines
from mypy.util import try_find_python2_interpreter
from mypy import api
# Path to Python 3 interpreter
python3_path = sys.executable
program_re = re.compile(r'\b_program.py\b')
class PythonEvaluationSuite(DataSuite):
files = ['pythoneval.test',
'python2eval.test',
'pythoneval-asyncio.test']
cache_dir = TemporaryDirectory()
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_python_evaluation(testcase, os.path.join(self.cache_dir.name, '.mypy_cache'))
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None:
"""Runs Mypy in a subprocess.
If this passes without errors, executes the script again with a given Python
version.
"""
assert testcase.old_cwd is not None, "test was not properly set up"
# We must enable site packages to get access to installed stubs.
# TODO: Enable strict optional for these tests
mypy_cmdline = [
'--show-traceback',
'--no-strict-optional',
'--no-silence-site-packages',
'--no-error-summary',
]
py2 = testcase.name.lower().endswith('python2')
if py2:
mypy_cmdline.append('--py2')
interpreter = try_find_python2_interpreter()
if interpreter is None:
# Skip, can't find a Python 2 interpreter.
pytest.skip()
# placate the type checker
return
else:
interpreter = python3_path
mypy_cmdline.append('--python-version={}'.format('.'.join(map(str, PYTHON3_VERSION))))
m = re.search('# flags: (.*)$', '\n'.join(testcase.input), re.MULTILINE)
if m:
mypy_cmdline.extend(m.group(1).split())
# Write the program to a file.
program = '_' + testcase.name + '.py'
program_path = os.path.join(test_temp_dir, program)
mypy_cmdline.append(program_path)
with open(program_path, 'w', encoding='utf8') as file:
for s in testcase.input:
file.write('{}\n'.format(s))
mypy_cmdline.append('--cache-dir={}'.format(cache_dir))
output = []
# Type check the program.
out, err, returncode = api.run(mypy_cmdline)
# split lines, remove newlines, and remove directory of test case
for line in (out + err).splitlines():
if line.startswith(test_temp_dir + os.sep):
output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n"))
else:
# Normalize paths so that the output is the same on Windows and Linux/macOS.
line = line.replace(test_temp_dir + os.sep, test_temp_dir + '/')
output.append(line.rstrip("\r\n"))
if returncode == 0:
# Execute the program.
proc = subprocess.run([interpreter, '-Wignore', program],
cwd=test_temp_dir, stdout=PIPE, stderr=PIPE)
output.extend(split_lines(proc.stdout, proc.stderr))
# Remove temp file.
os.remove(program_path)
for i, line in enumerate(output):
if os.path.sep + 'typeshed' + os.path.sep in line:
output[i] = line.split(os.path.sep)[-1]
assert_string_arrays_equal(adapt_output(testcase), output,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
def adapt_output(testcase: DataDrivenTestCase) -> List[str]:
"""Translates the generic _program.py into the actual filename."""
program = '_' + testcase.name + '.py'
return [program_re.sub(program, line) for line in testcase.output]

View file

@ -0,0 +1,40 @@
"""Test cases for reports generated by mypy."""
import textwrap
from mypy.test.helpers import Suite, assert_equal
from mypy.report import CoberturaPackage, get_line_rate
class CoberturaReportSuite(Suite):
def test_get_line_rate(self) -> None:
assert_equal('1.0', get_line_rate(0, 0))
assert_equal('0.3333', get_line_rate(1, 3))
def test_as_xml(self) -> None:
import lxml.etree as etree # type: ignore
cobertura_package = CoberturaPackage('foobar')
cobertura_package.covered_lines = 21
cobertura_package.total_lines = 42
child_package = CoberturaPackage('raz')
child_package.covered_lines = 10
child_package.total_lines = 10
child_package.classes['class'] = etree.Element('class')
cobertura_package.packages['raz'] = child_package
expected_output = textwrap.dedent('''\
<package complexity="1.0" name="foobar" branch-rate="0" line-rate="0.5000">
<classes/>
<packages>
<package complexity="1.0" name="raz" branch-rate="0" line-rate="1.0000">
<classes>
<class/>
</classes>
</package>
</packages>
</package>
''').encode('ascii')
assert_equal(expected_output,
etree.tostring(cobertura_package.as_xml(), pretty_print=True))

View file

@ -0,0 +1,216 @@
"""Semantic analyzer test cases"""
import os.path
import sys
from typing import Dict, List
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.defaults import PYTHON3_VERSION
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages, testfile_pyversion, parse_options
)
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.config import test_temp_dir
from mypy.errors import CompileError
from mypy.nodes import TypeInfo
from mypy.options import Options
# Semantic analyzer test cases: dump parse tree
# Semantic analysis test case description files.
semanal_files = [
'semanal-basic.test',
'semanal-expressions.test',
'semanal-classes.test',
'semanal-types.test',
'semanal-typealiases.test',
'semanal-modules.test',
'semanal-statements.test',
'semanal-abstractclasses.test',
'semanal-namedtuple.test',
'semanal-typeddict.test',
'semenal-literal.test',
'semanal-classvar.test',
'semanal-python2.test',
'semanal-lambda.test',
]
if sys.version_info >= (3, 10):
semanal_files.append('semanal-python310.test')
def get_semanal_options(program_text: str, testcase: DataDrivenTestCase) -> Options:
options = parse_options(program_text, testcase, 1)
options.use_builtins_fixtures = True
options.semantic_analysis_only = True
options.show_traceback = True
options.python_version = PYTHON3_VERSION
return options
class SemAnalSuite(DataSuite):
files = semanal_files
native_sep = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_semanal(testcase)
def test_semanal(testcase: DataDrivenTestCase) -> None:
"""Perform a semantic analysis test case.
The testcase argument contains a description of the test case
(inputs and output).
"""
try:
src = '\n'.join(testcase.input)
options = get_semanal_options(src, testcase)
options.python_version = testfile_pyversion(testcase.file)
result = build.build(sources=[BuildSource('main', None, src)],
options=options,
alt_lib_path=test_temp_dir)
a = result.errors
if a:
raise CompileError(a)
# Include string representations of the source files in the actual
# output.
for fnam in sorted(result.files.keys()):
f = result.files[fnam]
# Omit the builtins module and files with a special marker in the
# path.
# TODO the test is not reliable
if (not f.path.endswith((os.sep + 'builtins.pyi',
'typing.pyi',
'mypy_extensions.pyi',
'typing_extensions.pyi',
'abc.pyi',
'collections.pyi',
'sys.pyi'))
and not os.path.basename(f.path).startswith('_')
and not os.path.splitext(
os.path.basename(f.path))[0].endswith('_')):
a += str(f).split('\n')
except CompileError as e:
a = e.messages
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid semantic analyzer output ({}, line {})'.format(testcase.file,
testcase.line))
# Semantic analyzer error test cases
class SemAnalErrorSuite(DataSuite):
files = ['semanal-errors.test']
if sys.version_info >= (3, 10):
semanal_files.append('semanal-errors-python310.test')
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_semanal_error(testcase)
def test_semanal_error(testcase: DataDrivenTestCase) -> None:
"""Perform a test case."""
try:
src = '\n'.join(testcase.input)
res = build.build(sources=[BuildSource('main', None, src)],
options=get_semanal_options(src, testcase),
alt_lib_path=test_temp_dir)
a = res.errors
assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line)
except CompileError as e:
# Verify that there was a compile error and that the error messages
# are equivalent.
a = e.messages
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
# SymbolNode table export test cases
class SemAnalSymtableSuite(DataSuite):
required_out_section = True
files = ['semanal-symtable.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a test case."""
try:
# Build test case input.
src = '\n'.join(testcase.input)
result = build.build(sources=[BuildSource('main', None, src)],
options=get_semanal_options(src, testcase),
alt_lib_path=test_temp_dir)
# The output is the symbol table converted into a string.
a = result.errors
if a:
raise CompileError(a)
for f in sorted(result.files.keys()):
if f not in ('builtins', 'typing', 'abc'):
a.append('{}:'.format(f))
for s in str(result.files[f].names).split('\n'):
a.append(' ' + s)
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
testcase.output, a,
'Invalid semantic analyzer output ({}, line {})'.format(
testcase.file, testcase.line))
# Type info export test cases
class SemAnalTypeInfoSuite(DataSuite):
required_out_section = True
files = ['semanal-typeinfo.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a test case."""
try:
# Build test case input.
src = '\n'.join(testcase.input)
result = build.build(sources=[BuildSource('main', None, src)],
options=get_semanal_options(src, testcase),
alt_lib_path=test_temp_dir)
a = result.errors
if a:
raise CompileError(a)
# Collect all TypeInfos in top-level modules.
typeinfos = TypeInfoMap()
for f in result.files.values():
for n in f.names.values():
if isinstance(n.node, TypeInfo):
assert n.fullname is not None
typeinfos[n.fullname] = n.node
# The output is the symbol table converted into a string.
a = str(typeinfos).split('\n')
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
testcase.output, a,
'Invalid semantic analyzer output ({}, line {})'.format(
testcase.file, testcase.line))
class TypeInfoMap(Dict[str, TypeInfo]):
def __str__(self) -> str:
a: List[str] = ["TypeInfoMap("]
for x, y in sorted(self.items()):
if isinstance(x, str) and (not x.startswith('builtins.') and
not x.startswith('typing.') and
not x.startswith('abc.')):
ti = ('\n' + ' ').join(str(y).split('\n'))
a.append(' {} : {}'.format(x, ti))
a[-1] += ')'
return '\n'.join(a)

View file

@ -0,0 +1,131 @@
"""Test cases for the constraint solver used in type inference."""
from typing import List, Union, Tuple, Optional
from mypy.test.helpers import Suite, assert_equal
from mypy.constraints import SUPERTYPE_OF, SUBTYPE_OF, Constraint
from mypy.solve import solve_constraints
from mypy.test.typefixture import TypeFixture
from mypy.types import Type, TypeVarType, TypeVarId
class SolveSuite(Suite):
def setUp(self) -> None:
self.fx = TypeFixture()
def test_empty_input(self) -> None:
self.assert_solve([], [], [])
def test_simple_supertype_constraints(self) -> None:
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.a)],
[(self.fx.a, self.fx.o)])
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.a),
self.supc(self.fx.t, self.fx.b)],
[(self.fx.a, self.fx.o)])
def test_simple_subtype_constraints(self) -> None:
self.assert_solve([self.fx.t.id],
[self.subc(self.fx.t, self.fx.a)],
[self.fx.a])
self.assert_solve([self.fx.t.id],
[self.subc(self.fx.t, self.fx.a),
self.subc(self.fx.t, self.fx.b)],
[self.fx.b])
def test_both_kinds_of_constraints(self) -> None:
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.b),
self.subc(self.fx.t, self.fx.a)],
[(self.fx.b, self.fx.a)])
def test_unsatisfiable_constraints(self) -> None:
# The constraints are impossible to satisfy.
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.a),
self.subc(self.fx.t, self.fx.b)],
[None])
def test_exactly_specified_result(self) -> None:
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.b),
self.subc(self.fx.t, self.fx.b)],
[(self.fx.b, self.fx.b)])
def test_multiple_variables(self) -> None:
self.assert_solve([self.fx.t.id, self.fx.s.id],
[self.supc(self.fx.t, self.fx.b),
self.supc(self.fx.s, self.fx.c),
self.subc(self.fx.t, self.fx.a)],
[(self.fx.b, self.fx.a), (self.fx.c, self.fx.o)])
def test_no_constraints_for_var(self) -> None:
self.assert_solve([self.fx.t.id],
[],
[self.fx.uninhabited])
self.assert_solve([self.fx.t.id, self.fx.s.id],
[],
[self.fx.uninhabited, self.fx.uninhabited])
self.assert_solve([self.fx.t.id, self.fx.s.id],
[self.supc(self.fx.s, self.fx.a)],
[self.fx.uninhabited, (self.fx.a, self.fx.o)])
def test_simple_constraints_with_dynamic_type(self) -> None:
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.anyt)],
[(self.fx.anyt, self.fx.anyt)])
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.anyt),
self.supc(self.fx.t, self.fx.anyt)],
[(self.fx.anyt, self.fx.anyt)])
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.anyt),
self.supc(self.fx.t, self.fx.a)],
[(self.fx.anyt, self.fx.anyt)])
self.assert_solve([self.fx.t.id],
[self.subc(self.fx.t, self.fx.anyt)],
[(self.fx.anyt, self.fx.anyt)])
self.assert_solve([self.fx.t.id],
[self.subc(self.fx.t, self.fx.anyt),
self.subc(self.fx.t, self.fx.anyt)],
[(self.fx.anyt, self.fx.anyt)])
# self.assert_solve([self.fx.t.id],
# [self.subc(self.fx.t, self.fx.anyt),
# self.subc(self.fx.t, self.fx.a)],
# [(self.fx.anyt, self.fx.anyt)])
# TODO: figure out what this should be after changes to meet(any, X)
def test_both_normal_and_any_types_in_results(self) -> None:
# If one of the bounds is any, we promote the other bound to
# any as well, since otherwise the type range does not make sense.
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.a),
self.subc(self.fx.t, self.fx.anyt)],
[(self.fx.anyt, self.fx.anyt)])
self.assert_solve([self.fx.t.id],
[self.supc(self.fx.t, self.fx.anyt),
self.subc(self.fx.t, self.fx.a)],
[(self.fx.anyt, self.fx.anyt)])
def assert_solve(self,
vars: List[TypeVarId],
constraints: List[Constraint],
results: List[Union[None, Type, Tuple[Type, Type]]],
) -> None:
res: List[Optional[Type]] = []
for r in results:
if isinstance(r, tuple):
res.append(r[0])
else:
res.append(r)
actual = solve_constraints(vars, constraints)
assert_equal(str(actual), str(res))
def supc(self, type_var: TypeVarType, bound: Type) -> Constraint:
return Constraint(type_var.id, SUPERTYPE_OF, bound)
def subc(self, type_var: TypeVarType, bound: Type) -> Constraint:
return Constraint(type_var.id, SUBTYPE_OF, bound)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,18 @@
import unittest
from mypy.stubinfo import is_legacy_bundled_package
class TestStubInfo(unittest.TestCase):
def test_is_legacy_bundled_packages(self) -> None:
assert not is_legacy_bundled_package('foobar_asdf', 2)
assert not is_legacy_bundled_package('foobar_asdf', 3)
assert is_legacy_bundled_package('certifi', 2)
assert is_legacy_bundled_package('certifi', 3)
assert is_legacy_bundled_package('scribe', 2)
assert not is_legacy_bundled_package('scribe', 3)
assert not is_legacy_bundled_package('dataclasses', 2)
assert is_legacy_bundled_package('dataclasses', 3)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,206 @@
from mypy.test.helpers import Suite, skip
from mypy.nodes import CONTRAVARIANT, INVARIANT, COVARIANT
from mypy.subtypes import is_subtype
from mypy.test.typefixture import TypeFixture, InterfaceTypeFixture
from mypy.types import Type
class SubtypingSuite(Suite):
def setUp(self) -> None:
self.fx = TypeFixture(INVARIANT)
self.fx_contra = TypeFixture(CONTRAVARIANT)
self.fx_co = TypeFixture(COVARIANT)
def test_trivial_cases(self) -> None:
for simple in self.fx_co.a, self.fx_co.o, self.fx_co.b:
self.assert_subtype(simple, simple)
def test_instance_subtyping(self) -> None:
self.assert_strict_subtype(self.fx.a, self.fx.o)
self.assert_strict_subtype(self.fx.b, self.fx.o)
self.assert_strict_subtype(self.fx.b, self.fx.a)
self.assert_not_subtype(self.fx.a, self.fx.d)
self.assert_not_subtype(self.fx.b, self.fx.c)
def test_simple_generic_instance_subtyping_invariant(self) -> None:
self.assert_subtype(self.fx.ga, self.fx.ga)
self.assert_subtype(self.fx.hab, self.fx.hab)
self.assert_not_subtype(self.fx.ga, self.fx.g2a)
self.assert_not_subtype(self.fx.ga, self.fx.gb)
self.assert_not_subtype(self.fx.gb, self.fx.ga)
def test_simple_generic_instance_subtyping_covariant(self) -> None:
self.assert_subtype(self.fx_co.ga, self.fx_co.ga)
self.assert_subtype(self.fx_co.hab, self.fx_co.hab)
self.assert_not_subtype(self.fx_co.ga, self.fx_co.g2a)
self.assert_not_subtype(self.fx_co.ga, self.fx_co.gb)
self.assert_subtype(self.fx_co.gb, self.fx_co.ga)
def test_simple_generic_instance_subtyping_contravariant(self) -> None:
self.assert_subtype(self.fx_contra.ga, self.fx_contra.ga)
self.assert_subtype(self.fx_contra.hab, self.fx_contra.hab)
self.assert_not_subtype(self.fx_contra.ga, self.fx_contra.g2a)
self.assert_subtype(self.fx_contra.ga, self.fx_contra.gb)
self.assert_not_subtype(self.fx_contra.gb, self.fx_contra.ga)
def test_generic_subtyping_with_inheritance_invariant(self) -> None:
self.assert_subtype(self.fx.gsab, self.fx.gb)
self.assert_not_subtype(self.fx.gsab, self.fx.ga)
self.assert_not_subtype(self.fx.gsaa, self.fx.gb)
def test_generic_subtyping_with_inheritance_covariant(self) -> None:
self.assert_subtype(self.fx_co.gsab, self.fx_co.gb)
self.assert_subtype(self.fx_co.gsab, self.fx_co.ga)
self.assert_not_subtype(self.fx_co.gsaa, self.fx_co.gb)
def test_generic_subtyping_with_inheritance_contravariant(self) -> None:
self.assert_subtype(self.fx_contra.gsab, self.fx_contra.gb)
self.assert_not_subtype(self.fx_contra.gsab, self.fx_contra.ga)
self.assert_subtype(self.fx_contra.gsaa, self.fx_contra.gb)
def test_interface_subtyping(self) -> None:
self.assert_subtype(self.fx.e, self.fx.f)
self.assert_equivalent(self.fx.f, self.fx.f)
self.assert_not_subtype(self.fx.a, self.fx.f)
@skip
def test_generic_interface_subtyping(self) -> None:
# TODO make this work
fx2 = InterfaceTypeFixture()
self.assert_subtype(fx2.m1, fx2.gfa)
self.assert_not_subtype(fx2.m1, fx2.gfb)
self.assert_equivalent(fx2.gfa, fx2.gfa)
def test_basic_callable_subtyping(self) -> None:
self.assert_strict_subtype(self.fx.callable(self.fx.o, self.fx.d),
self.fx.callable(self.fx.a, self.fx.d))
self.assert_strict_subtype(self.fx.callable(self.fx.d, self.fx.b),
self.fx.callable(self.fx.d, self.fx.a))
self.assert_strict_subtype(self.fx.callable(self.fx.a, self.fx.nonet),
self.fx.callable(self.fx.a, self.fx.a))
self.assert_unrelated(
self.fx.callable(self.fx.a, self.fx.a, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a))
def test_default_arg_callable_subtyping(self) -> None:
self.assert_strict_subtype(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.a, self.fx.d, self.fx.a))
self.assert_strict_subtype(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a))
self.assert_strict_subtype(
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a))
self.assert_unrelated(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.d, self.fx.d, self.fx.a))
self.assert_unrelated(
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable_default(1, self.fx.a, self.fx.a, self.fx.a))
self.assert_unrelated(
self.fx.callable_default(1, self.fx.a, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a, self.fx.a))
def test_var_arg_callable_subtyping_1(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
self.fx.callable_var_arg(0, self.fx.b, self.fx.a))
def test_var_arg_callable_subtyping_2(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a))
def test_var_arg_callable_subtyping_3(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
self.fx.callable(self.fx.a))
def test_var_arg_callable_subtyping_4(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a))
def test_var_arg_callable_subtyping_5(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a))
def test_var_arg_callable_subtyping_6(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.f, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.e, self.fx.d))
def test_var_arg_callable_subtyping_7(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
self.fx.callable(self.fx.a, self.fx.d))
def test_var_arg_callable_subtyping_8(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d))
self.assert_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d))
def test_var_arg_callable_subtyping_9(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d),
self.fx.callable_var_arg(0, self.fx.a, self.fx.d))
self.assert_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.d))
def test_type_callable_subtyping(self) -> None:
self.assert_subtype(
self.fx.callable_type(self.fx.d, self.fx.a), self.fx.type_type)
self.assert_strict_subtype(
self.fx.callable_type(self.fx.d, self.fx.b),
self.fx.callable(self.fx.d, self.fx.a))
self.assert_strict_subtype(self.fx.callable_type(self.fx.a, self.fx.b),
self.fx.callable(self.fx.a, self.fx.b))
# IDEA: Maybe add these test cases (they are tested pretty well in type
# checker tests already):
# * more interface subtyping test cases
# * more generic interface subtyping test cases
# * type variables
# * tuple types
# * None type
# * any type
# * generic function types
def assert_subtype(self, s: Type, t: Type) -> None:
assert is_subtype(s, t), '{} not subtype of {}'.format(s, t)
def assert_not_subtype(self, s: Type, t: Type) -> None:
assert not is_subtype(s, t), '{} subtype of {}'.format(s, t)
def assert_strict_subtype(self, s: Type, t: Type) -> None:
self.assert_subtype(s, t)
self.assert_not_subtype(t, s)
def assert_equivalent(self, s: Type, t: Type) -> None:
self.assert_subtype(s, t)
self.assert_subtype(t, s)
def assert_unrelated(self, s: Type, t: Type) -> None:
self.assert_not_subtype(s, t)
self.assert_not_subtype(t, s)

View file

@ -0,0 +1,73 @@
"""Identity AST transform test cases"""
import os.path
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages, parse_options
)
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.config import test_temp_dir
from mypy.test.visitors import TypeAssertTransformVisitor
from mypy.errors import CompileError
class TransformSuite(DataSuite):
required_out_section = True
# Reuse semantic analysis test cases.
files = ['semanal-basic.test',
'semanal-expressions.test',
'semanal-classes.test',
'semanal-types.test',
'semanal-modules.test',
'semanal-statements.test',
'semanal-abstractclasses.test',
'semanal-python2.test']
native_sep = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_transform(testcase)
def test_transform(testcase: DataDrivenTestCase) -> None:
"""Perform an identity transform test case."""
try:
src = '\n'.join(testcase.input)
options = parse_options(src, testcase, 1)
options.use_builtins_fixtures = True
options.semantic_analysis_only = True
options.show_traceback = True
result = build.build(sources=[BuildSource('main', None, src)],
options=options,
alt_lib_path=test_temp_dir)
a = result.errors
if a:
raise CompileError(a)
# Include string representations of the source files in the actual
# output.
for fnam in sorted(result.files.keys()):
f = result.files[fnam]
# Omit the builtins module and files with a special marker in the
# path.
# TODO the test is not reliable
if (not f.path.endswith((os.sep + 'builtins.pyi',
'typing.pyi',
'abc.pyi'))
and not os.path.basename(f.path).startswith('_')
and not os.path.splitext(
os.path.basename(f.path))[0].endswith('_')):
t = TypeAssertTransformVisitor()
t.test_only = True
f = t.mypyfile(f)
a += str(f).split('\n')
except CompileError as e:
a = e.messages
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a,
'Invalid semantic analyzer output ({}, line {})'.format(testcase.file,
testcase.line))

View file

@ -0,0 +1,72 @@
"""Test cases for the type checker: exporting inferred types"""
import re
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal
from mypy.test.visitors import SkippedNodeSearcher, ignore_node
from mypy.util import short_type
from mypy.nodes import NameExpr
from mypy.errors import CompileError
from mypy.options import Options
class TypeExportSuite(DataSuite):
required_out_section = True
files = ['typexport-basic.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
try:
line = testcase.input[0]
mask = ''
if line.startswith('##'):
mask = '(' + line[2:].strip() + ')$'
src = '\n'.join(testcase.input)
options = Options()
options.strict_optional = False # TODO: Enable strict optional checking
options.use_builtins_fixtures = True
options.show_traceback = True
options.export_types = True
options.preserve_asts = True
result = build.build(sources=[BuildSource('main', None, src)],
options=options,
alt_lib_path=test_temp_dir)
a = result.errors
map = result.types
nodes = map.keys()
# Ignore NameExpr nodes of variables with explicit (trivial) types
# to simplify output.
searcher = SkippedNodeSearcher()
for file in result.files.values():
file.accept(searcher)
ignored = searcher.nodes
# Filter nodes that should be included in the output.
keys = []
for node in nodes:
if node.line is not None and node.line != -1 and map[node]:
if ignore_node(node) or node in ignored:
continue
if (re.match(mask, short_type(node))
or (isinstance(node, NameExpr)
and re.match(mask, node.name))):
# Include node in output.
keys.append(node)
for key in sorted(keys,
key=lambda n: (n.line, short_type(n),
str(n) + str(map[n]))):
ts = str(map[key]).replace('*', '') # Remove erased tags
ts = ts.replace('__main__.', '')
a.append('{}({}) : {}'.format(short_type(key), key.line, ts))
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
testcase.output, a,
'Invalid type checker output ({}, line {})'.format(testcase.file,
testcase.line))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
import os
from unittest import mock, TestCase
from mypy.util import get_terminal_width
class TestGetTerminalSize(TestCase):
def test_get_terminal_size_in_pty_defaults_to_80(self) -> None:
# when run using a pty, `os.get_terminal_size()` returns `0, 0`
ret = os.terminal_size((0, 0))
mock_environ = os.environ.copy()
mock_environ.pop('COLUMNS', None)
with mock.patch.object(os, 'get_terminal_size', return_value=ret):
with mock.patch.dict(os.environ, values=mock_environ, clear=True):
assert get_terminal_width() == 80

View file

@ -0,0 +1,304 @@
"""Fixture used in type-related test cases.
It contains class TypeInfos and Type objects.
"""
from typing import List, Optional, Tuple
from mypy.semanal_shared import set_callable_name
from mypy.types import (
Type, AnyType, NoneType, Instance, CallableType, TypeVarType, TypeType,
UninhabitedType, TypeOfAny, TypeAliasType, UnionType, LiteralType,
TypeVarLikeType
)
from mypy.nodes import (
TypeInfo, ClassDef, FuncDef, Block, ARG_POS, ARG_OPT, ARG_STAR, SymbolTable,
COVARIANT, TypeAlias, SymbolTableNode, MDEF,
)
class TypeFixture:
"""Helper class that is used as a fixture in type-related unit tests.
The members are initialized to contain various type-related values.
"""
def __init__(self, variance: int = COVARIANT) -> None:
# The 'object' class
self.oi = self.make_type_info('builtins.object') # class object
self.o = Instance(self.oi, []) # object
# Type variables (these are effectively global)
def make_type_var(name: str, id: int, values: List[Type], upper_bound: Type,
variance: int) -> TypeVarType:
return TypeVarType(name, name, id, values, upper_bound, variance)
self.t = make_type_var('T', 1, [], self.o, variance) # T`1 (type variable)
self.tf = make_type_var('T', -1, [], self.o, variance) # T`-1 (type variable)
self.tf2 = make_type_var('T', -2, [], self.o, variance) # T`-2 (type variable)
self.s = make_type_var('S', 2, [], self.o, variance) # S`2 (type variable)
self.s1 = make_type_var('S', 1, [], self.o, variance) # S`1 (type variable)
self.sf = make_type_var('S', -2, [], self.o, variance) # S`-2 (type variable)
self.sf1 = make_type_var('S', -1, [], self.o, variance) # S`-1 (type variable)
# Simple types
self.anyt = AnyType(TypeOfAny.special_form)
self.nonet = NoneType()
self.uninhabited = UninhabitedType()
# Abstract class TypeInfos
# class F
self.fi = self.make_type_info('F', is_abstract=True)
# class F2
self.f2i = self.make_type_info('F2', is_abstract=True)
# class F3(F)
self.f3i = self.make_type_info('F3', is_abstract=True, mro=[self.fi])
# Class TypeInfos
self.std_tuplei = self.make_type_info('builtins.tuple',
mro=[self.oi],
typevars=['T'],
variances=[COVARIANT]) # class tuple
self.type_typei = self.make_type_info('builtins.type') # class type
self.bool_type_info = self.make_type_info('builtins.bool')
self.functioni = self.make_type_info('builtins.function') # function TODO
self.ai = self.make_type_info('A', mro=[self.oi]) # class A
self.bi = self.make_type_info('B', mro=[self.ai, self.oi]) # class B(A)
self.ci = self.make_type_info('C', mro=[self.ai, self.oi]) # class C(A)
self.di = self.make_type_info('D', mro=[self.oi]) # class D
# class E(F)
self.ei = self.make_type_info('E', mro=[self.fi, self.oi])
# class E2(F2, F)
self.e2i = self.make_type_info('E2', mro=[self.f2i, self.fi, self.oi])
# class E3(F, F2)
self.e3i = self.make_type_info('E3', mro=[self.fi, self.f2i, self.oi])
# Generic class TypeInfos
# G[T]
self.gi = self.make_type_info('G', mro=[self.oi],
typevars=['T'],
variances=[variance])
# G2[T]
self.g2i = self.make_type_info('G2', mro=[self.oi],
typevars=['T'],
variances=[variance])
# H[S, T]
self.hi = self.make_type_info('H', mro=[self.oi],
typevars=['S', 'T'],
variances=[variance, variance])
# GS[T, S] <: G[S]
self.gsi = self.make_type_info('GS', mro=[self.gi, self.oi],
typevars=['T', 'S'],
variances=[variance, variance],
bases=[Instance(self.gi, [self.s])])
# GS2[S] <: G[S]
self.gs2i = self.make_type_info('GS2', mro=[self.gi, self.oi],
typevars=['S'],
variances=[variance],
bases=[Instance(self.gi, [self.s1])])
# list[T]
self.std_listi = self.make_type_info('builtins.list', mro=[self.oi],
typevars=['T'],
variances=[variance])
# Instance types
self.std_tuple = Instance(self.std_tuplei, [self.anyt]) # tuple
self.type_type = Instance(self.type_typei, []) # type
self.function = Instance(self.functioni, []) # function TODO
self.a = Instance(self.ai, []) # A
self.b = Instance(self.bi, []) # B
self.c = Instance(self.ci, []) # C
self.d = Instance(self.di, []) # D
self.e = Instance(self.ei, []) # E
self.e2 = Instance(self.e2i, []) # E2
self.e3 = Instance(self.e3i, []) # E3
self.f = Instance(self.fi, []) # F
self.f2 = Instance(self.f2i, []) # F2
self.f3 = Instance(self.f3i, []) # F3
# Generic instance types
self.ga = Instance(self.gi, [self.a]) # G[A]
self.gb = Instance(self.gi, [self.b]) # G[B]
self.gd = Instance(self.gi, [self.d]) # G[D]
self.go = Instance(self.gi, [self.o]) # G[object]
self.gt = Instance(self.gi, [self.t]) # G[T`1]
self.gtf = Instance(self.gi, [self.tf]) # G[T`-1]
self.gtf2 = Instance(self.gi, [self.tf2]) # G[T`-2]
self.gs = Instance(self.gi, [self.s]) # G[S]
self.gdyn = Instance(self.gi, [self.anyt]) # G[Any]
self.gn = Instance(self.gi, [NoneType()]) # G[None]
self.g2a = Instance(self.g2i, [self.a]) # G2[A]
self.gsaa = Instance(self.gsi, [self.a, self.a]) # GS[A, A]
self.gsab = Instance(self.gsi, [self.a, self.b]) # GS[A, B]
self.gsba = Instance(self.gsi, [self.b, self.a]) # GS[B, A]
self.gs2a = Instance(self.gs2i, [self.a]) # GS2[A]
self.gs2b = Instance(self.gs2i, [self.b]) # GS2[B]
self.gs2d = Instance(self.gs2i, [self.d]) # GS2[D]
self.hab = Instance(self.hi, [self.a, self.b]) # H[A, B]
self.haa = Instance(self.hi, [self.a, self.a]) # H[A, A]
self.hbb = Instance(self.hi, [self.b, self.b]) # H[B, B]
self.hts = Instance(self.hi, [self.t, self.s]) # H[T, S]
self.had = Instance(self.hi, [self.a, self.d]) # H[A, D]
self.hao = Instance(self.hi, [self.a, self.o]) # H[A, object]
self.lsta = Instance(self.std_listi, [self.a]) # List[A]
self.lstb = Instance(self.std_listi, [self.b]) # List[B]
self.lit1 = LiteralType(1, self.a)
self.lit2 = LiteralType(2, self.a)
self.lit3 = LiteralType("foo", self.d)
self.lit4 = LiteralType(4, self.a)
self.lit1_inst = Instance(self.ai, [], last_known_value=self.lit1)
self.lit2_inst = Instance(self.ai, [], last_known_value=self.lit2)
self.lit3_inst = Instance(self.di, [], last_known_value=self.lit3)
self.lit4_inst = Instance(self.ai, [], last_known_value=self.lit4)
self.type_a = TypeType.make_normalized(self.a)
self.type_b = TypeType.make_normalized(self.b)
self.type_c = TypeType.make_normalized(self.c)
self.type_d = TypeType.make_normalized(self.d)
self.type_t = TypeType.make_normalized(self.t)
self.type_any = TypeType.make_normalized(self.anyt)
self._add_bool_dunder(self.bool_type_info)
self._add_bool_dunder(self.ai)
def _add_bool_dunder(self, type_info: TypeInfo) -> None:
signature = CallableType([], [], [], Instance(self.bool_type_info, []), self.function)
bool_func = FuncDef('__bool__', [], Block([]))
bool_func.type = set_callable_name(signature, bool_func)
type_info.names[bool_func.name] = SymbolTableNode(MDEF, bool_func)
# Helper methods
def callable(self, *a: Type) -> CallableType:
"""callable(a1, ..., an, r) constructs a callable with argument types
a1, ... an and return type r.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.function)
def callable_type(self, *a: Type) -> CallableType:
"""callable_type(a1, ..., an, r) constructs a callable with
argument types a1, ... an and return type r, and which
represents a type.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.type_type)
def callable_default(self, min_args: int, *a: Type) -> CallableType:
"""callable_default(min_args, a1, ..., an, r) constructs a
callable with argument types a1, ... an and return type r,
with min_args mandatory fixed arguments.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args + [ARG_OPT] * (n - min_args),
[None] * n,
a[-1], self.function)
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
"""callable_var_arg(min_args, a1, ..., an, r) constructs a callable
with argument types a1, ... *an and return type r.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args +
[ARG_OPT] * (n - 1 - min_args) +
[ARG_STAR], [None] * n,
a[-1], self.function)
def make_type_info(self, name: str,
module_name: Optional[str] = None,
is_abstract: bool = False,
mro: Optional[List[TypeInfo]] = None,
bases: Optional[List[Instance]] = None,
typevars: Optional[List[str]] = None,
variances: Optional[List[int]] = None) -> TypeInfo:
"""Make a TypeInfo suitable for use in unit tests."""
class_def = ClassDef(name, Block([]), None, [])
class_def.fullname = name
if module_name is None:
if '.' in name:
module_name = name.rsplit('.', 1)[0]
else:
module_name = '__main__'
if typevars:
v: List[TypeVarLikeType] = []
for id, n in enumerate(typevars, 1):
if variances:
variance = variances[id - 1]
else:
variance = COVARIANT
v.append(TypeVarType(n, n, id, [], self.o, variance=variance))
class_def.type_vars = v
info = TypeInfo(SymbolTable(), class_def, module_name)
if mro is None:
mro = []
if name != 'builtins.object':
mro.append(self.oi)
info.mro = [info] + mro
if bases is None:
if mro:
# By default, assume that there is a single non-generic base.
bases = [Instance(mro[0], [])]
else:
bases = []
info.bases = bases
return info
def def_alias_1(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = Instance(self.std_tuplei,
[UnionType([base, A])]) # A = Tuple[Union[base, A], ...]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def def_alias_2(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = UnionType([base,
Instance(self.std_tuplei, [A])]) # A = Union[base, Tuple[A, ...]]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def non_rec_alias(self, target: Type) -> TypeAliasType:
AN = TypeAlias(target, '__main__.A', -1, -1)
return TypeAliasType(AN, [])
class InterfaceTypeFixture(TypeFixture):
"""Extension of TypeFixture that contains additional generic
interface types."""
def __init__(self) -> None:
super().__init__()
# GF[T]
self.gfi = self.make_type_info('GF', typevars=['T'], is_abstract=True)
# M1 <: GF[A]
self.m1i = self.make_type_info('M1',
is_abstract=True,
mro=[self.gfi, self.oi],
bases=[Instance(self.gfi, [self.a])])
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
self.m1 = Instance(self.m1i, []) # M1

View file

@ -0,0 +1,71 @@
"""Visitor classes pulled out from different tests
These are here because we don't currently support having interpreted
classes subtype compiled ones but pytest grabs the python file
even if the test was compiled.
"""
from typing import Set
from mypy.nodes import (
NameExpr, TypeVarExpr, CallExpr, Expression, MypyFile, AssignmentStmt, IntExpr
)
from mypy.traverser import TraverserVisitor
from mypy.treetransform import TransformVisitor
from mypy.types import Type
# from testtypegen
class SkippedNodeSearcher(TraverserVisitor):
def __init__(self) -> None:
self.nodes: Set[Expression] = set()
self.is_typing = False
def visit_mypy_file(self, f: MypyFile) -> None:
self.is_typing = f.fullname == 'typing' or f.fullname == 'builtins'
super().visit_mypy_file(f)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
if s.type or ignore_node(s.rvalue):
for lvalue in s.lvalues:
if isinstance(lvalue, NameExpr):
self.nodes.add(lvalue)
super().visit_assignment_stmt(s)
def visit_name_expr(self, n: NameExpr) -> None:
self.skip_if_typing(n)
def visit_int_expr(self, n: IntExpr) -> None:
self.skip_if_typing(n)
def skip_if_typing(self, n: Expression) -> None:
if self.is_typing:
self.nodes.add(n)
def ignore_node(node: Expression) -> bool:
"""Return True if node is to be omitted from test case output."""
# We want to get rid of object() expressions in the typing module stub
# and also TypeVar(...) expressions. Since detecting whether a node comes
# from the typing module is not easy, we just to strip them all away.
if isinstance(node, TypeVarExpr):
return True
if isinstance(node, NameExpr) and node.fullname == 'builtins.object':
return True
if isinstance(node, NameExpr) and node.fullname == 'builtins.None':
return True
if isinstance(node, CallExpr) and (ignore_node(node.callee) or
node.analyzed):
return True
return False
# from testtransform
class TypeAssertTransformVisitor(TransformVisitor):
def type(self, type: Type) -> Type:
assert type is not None
return type