Skip to content

Instantly share code, notes, and snippets.

@asmeurer
Last active July 27, 2023 22:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save asmeurer/9ad5b6ee067914808adad84d6d283af4 to your computer and use it in GitHub Desktop.
Save asmeurer/9ad5b6ee067914808adad84d6d283af4 to your computer and use it in GitHub Desktop.
hypothesis hang
^CYou can add @seed(209619979639939998162824709723232908623) to this test or run pytest with --hypothesis-seed=209619979639939998162824709723232908623 to reproduce this failure.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! KeyboardInterrupt !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
config = <_pytest.config.Config object at 0x7ff694b54a90>, doit = <function _main at 0x7ff692e01c10>
def wrap_session(
config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]]
) -> Union[int, ExitCode]:
"""Skeleton command line program."""
session = Session.from_config(config)
session.exitstatus = ExitCode.OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
> session.exitstatus = doit(config, session) or 0
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/main.py:270:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
config = <_pytest.config.Config object at 0x7ff694b54a90>, session = <Session ndindex exitstatus=<ExitCode.OK: 0> testsfailed=0 testscollected=4>
def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]:
"""Default command line protocol for initialization, session,
running tests and reporting."""
config.hook.pytest_collection(session=session)
> config.hook.pytest_runtestloop(session=session)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/main.py:324:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_HookCaller 'pytest_runtestloop'>, args = (), kwargs = {'session': <Session ndindex exitstatus=<ExitCode.OK: 0> testsfailed=0 testscollected=4>}
argname = 'session', firstresult = True
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = tuple(set(self.spec.argnames) - kwargs.keys())
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(notincall),
stacklevel=2,
)
break
firstresult = self.spec.opts.get("firstresult")
else:
firstresult = False
> return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_hooks.py:265:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_pytest.config.PytestPluginManager object at 0x7ff690de3970>, hook_name = 'pytest_runtestloop'
methods = [<HookImpl plugin_name='main', plugin=<module '_pytest.main' from '/Users/aaronmeurer/anaconda3/envs/ndindex/lib/pytho...f694bf26a0>>, <HookImpl plugin_name='logging-plugin', plugin=<_pytest.logging.LoggingPlugin object at 0x7ff69738a850>>]
kwargs = {'session': <Session ndindex exitstatus=<ExitCode.OK: 0> testsfailed=0 testscollected=4>}, firstresult = True
def _hookexec(self, hook_name, methods, kwargs, firstresult):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
> return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_manager.py:80:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
session = <Session ndindex exitstatus=<ExitCode.OK: 0> testsfailed=0 testscollected=4>
def pytest_runtestloop(session: "Session") -> bool:
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
> item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/main.py:349:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_HookCaller 'pytest_runtest_protocol'>, args = (), kwargs = {'item': <Function test_iter_indices>, 'nextitem': <Function test_iter_indices_cross>}
argname = 'nextitem', firstresult = True
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = tuple(set(self.spec.argnames) - kwargs.keys())
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(notincall),
stacklevel=2,
)
break
firstresult = self.spec.opts.get("firstresult")
else:
firstresult = False
> return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_hooks.py:265:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_pytest.config.PytestPluginManager object at 0x7ff690de3970>, hook_name = 'pytest_runtest_protocol'
methods = [<HookImpl plugin_name='runner', plugin=<module '_pytest.runner' from '/Users/aaronmeurer/anaconda3/envs/ndindex/lib/p... '_pytest.warnings' from '/Users/aaronmeurer/anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/warnings.py'>>]
kwargs = {'item': <Function test_iter_indices>, 'nextitem': <Function test_iter_indices_cross>}, firstresult = True
def _hookexec(self, hook_name, methods, kwargs, firstresult):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
> return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_manager.py:80:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
item = <Function test_iter_indices>, nextitem = <Function test_iter_indices_cross>
def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool:
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
> runtestprotocol(item, nextitem=nextitem)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:112:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
item = <Function test_iter_indices>, log = True, nextitem = <Function test_iter_indices_cross>
def runtestprotocol(
item: Item, log: bool = True, nextitem: Optional[Item] = None
) -> List[TestReport]:
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request: # type: ignore[attr-defined]
# This only happens if the item is re-run, as is done by
# pytest-rerunfailures.
item._initrequest() # type: ignore[attr-defined]
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.getoption("setupshow", False):
show_test_item(item)
if not item.config.getoption("setuponly", False):
> reports.append(call_and_report(item, "call", log))
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:131:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
item = <Function test_iter_indices>, when = 'call', log = True, kwds = {}
def call_and_report(
item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds
) -> TestReport:
> call = call_runtest_hook(item, when, **kwds)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:220:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
item = <Function test_iter_indices>, when = 'call', kwds = {}, reraise = (<class '_pytest.outcomes.Exit'>, <class 'KeyboardInterrupt'>)
def call_runtest_hook(
item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds
) -> "CallInfo[None]":
if when == "setup":
ihook: Callable[..., None] = item.ihook.pytest_runtest_setup
elif when == "call":
ihook = item.ihook.pytest_runtest_call
elif when == "teardown":
ihook = item.ihook.pytest_runtest_teardown
else:
assert False, f"Unhandled runtest hook case: {when}"
reraise: Tuple[Type[BaseException], ...] = (Exit,)
if not item.config.getoption("usepdb", False):
reraise += (KeyboardInterrupt,)
> return CallInfo.from_call(
lambda: ihook(item=item, **kwds), when=when, reraise=reraise
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:259:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.runner.CallInfo'>, func = <function call_runtest_hook.<locals>.<lambda> at 0x7ff69774f670>, when = 'call'
reraise = (<class '_pytest.outcomes.Exit'>, <class 'KeyboardInterrupt'>)
@classmethod
def from_call(
cls,
func: "Callable[[], TResult]",
when: "Literal['collect', 'setup', 'call', 'teardown']",
reraise: Optional[
Union[Type[BaseException], Tuple[Type[BaseException], ...]]
] = None,
) -> "CallInfo[TResult]":
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: Optional[TResult] = func()
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:339:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> lambda: ihook(item=item, **kwds), when=when, reraise=reraise
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:260:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_HookCaller 'pytest_runtest_call'>, args = (), kwargs = {'item': <Function test_iter_indices>}, argname = 'item', firstresult = False
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = tuple(set(self.spec.argnames) - kwargs.keys())
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(notincall),
stacklevel=2,
)
break
firstresult = self.spec.opts.get("firstresult")
else:
firstresult = False
> return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_hooks.py:265:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_pytest.config.PytestPluginManager object at 0x7ff690de3970>, hook_name = 'pytest_runtest_call'
methods = [<HookImpl plugin_name='runner', plugin=<module '_pytest.runner' from '/Users/aaronmeurer/anaconda3/envs/ndindex/lib/p...f26a0>>, <HookImpl plugin_name='logging-plugin', plugin=<_pytest.logging.LoggingPlugin object at 0x7ff69738a850>>, ...]
kwargs = {'item': <Function test_iter_indices>}, firstresult = False
def _hookexec(self, hook_name, methods, kwargs, firstresult):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
> return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_manager.py:80:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
item = <Function test_iter_indices>
def pytest_runtest_call(item: Item) -> None:
_update_current_test_var(item, "call")
try:
del sys.last_type
del sys.last_value
del sys.last_traceback
except AttributeError:
pass
try:
> item.runtest()
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/runner.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <Function test_iter_indices>
def runtest(self) -> None:
"""Execute the underlying test function."""
> self.ihook.pytest_pyfunc_call(pyfuncitem=self)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/python.py:1789:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_HookCaller 'pytest_pyfunc_call'>, args = (), kwargs = {'pyfuncitem': <Function test_iter_indices>}, argname = 'pyfuncitem', firstresult = True
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = tuple(set(self.spec.argnames) - kwargs.keys())
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(notincall),
stacklevel=2,
)
break
firstresult = self.spec.opts.get("firstresult")
else:
firstresult = False
> return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_hooks.py:265:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <_pytest.config.PytestPluginManager object at 0x7ff690de3970>, hook_name = 'pytest_pyfunc_call'
methods = [<HookImpl plugin_name='python', plugin=<module '_pytest.python' from '/Users/aaronmeurer/anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/python.py'>>]
kwargs = {'pyfuncitem': <Function test_iter_indices>}, firstresult = True
def _hookexec(self, hook_name, methods, kwargs, firstresult):
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
> return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/pluggy/_manager.py:80:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pyfuncitem = <Function test_iter_indices>
@hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
testfunction = pyfuncitem.obj
if is_async_function(testfunction):
async_warn_and_skip(pyfuncitem.nodeid)
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
> result = testfunction(**testargs)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/_pytest/python.py:195:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
@example([[(1, 1), (1, 1)], (1,)], (0,))
> @example([[(0,), (0,)], ()], (0,))
ndindex/tests/test_shapetools.py:24:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
def database(self):
if self.database_key is None:
return None
> return self.settings.database
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:474:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
def new_conjecture_data(self, prefix, max_length=BUFFER_SIZE, observer=None):
return ConjectureData(
prefix=prefix,
max_length=max_length,
> random=self.random,
observer=observer or self.tree.new_observer(),
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:880:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
self.__data_cache.pin(zero_data.buffer)
if zero_data.status == Status.OVERRUN or (
zero_data.status == Status.VALID and len(zero_data.buffer) * 2 > BUFFER_SIZE
):
fail_health_check(
self.settings,
"The smallest natural example for your test is extremely "
"large. This makes it difficult for Hypothesis to generate "
"good examples, especially when trying to reduce failing ones "
"at the end. Consider reducing the size of your data if it is "
"of a fixed size. You could also fix this by improving how "
"your data shrinks (see https://hypothesis.readthedocs.io/en/"
"latest/data.html#shrinking for details), or by introducing "
"default values inside your strategy. e.g. could you replace "
"some arguments with their defaults by using "
"one_of(none(), some_complex_strategy)?",
HealthCheck.large_base_example,
)
self.health_check_state = HealthCheckState()
# We attempt to use the size of the minimal generated test case starting
# from a given novel prefix as a guideline to generate smaller test
# cases for an initial period, by restriscting ourselves to test cases
# that are not much larger than it.
#
# Calculating the actual minimal generated test case is hard, so we
# take a best guess that zero extending a prefix produces the minimal
# test case starting with that prefix (this is true for our built in
# strategies). This is only a reasonable thing to do if the resulting
# test case is valid. If we regularly run into situations where it is
# not valid then this strategy is a waste of time, so we want to
# abandon it early. In order to do this we track how many times in a
# row it has failed to work, and abort small test case generation when
# it has failed too many times in a row.
consecutive_zero_extend_is_invalid = 0
# We control growth during initial example generation, for two
# reasons:
#
# * It gives us an opportunity to find small examples early, which
# gives us a fast path for easy to find bugs.
# * It avoids low probability events where we might end up
# generating very large examples during health checks, which
# on slower machines can trigger HealthCheck.too_slow.
#
# The heuristic we use is that we attempt to estimate the smallest
# extension of this prefix, and limit the size to no more than
# an order of magnitude larger than that. If we fail to estimate
# the size accurately, we skip over this prefix and try again.
#
# We need to tune the example size based on the initial prefix,
# because any fixed size might be too small, and any size based
# on the strategy in general can fall afoul of strategies that
# have very different sizes for different prefixes.
small_example_cap = clamp(10, self.settings.max_examples // 10, 50)
optimise_at = max(self.settings.max_examples // 2, small_example_cap + 1)
ran_optimisations = False
while self.should_generate_more():
prefix = self.generate_novel_prefix()
assert len(prefix) <= BUFFER_SIZE
if (
self.valid_examples <= small_example_cap
and self.call_count <= 5 * small_example_cap
and not self.interesting_examples
and consecutive_zero_extend_is_invalid < 5
):
minimal_example = self.cached_test_function(
prefix + bytes(BUFFER_SIZE - len(prefix))
)
if minimal_example.status < Status.VALID:
consecutive_zero_extend_is_invalid += 1
continue
consecutive_zero_extend_is_invalid = 0
minimal_extension = len(minimal_example.buffer) - len(prefix)
max_length = min(len(prefix) + minimal_extension * 10, BUFFER_SIZE)
# We could end up in a situation where even though the prefix was
# novel when we generated it, because we've now tried zero extending
# it not all possible continuations of it will be novel. In order to
# avoid making redundant test calls, we rerun it in simulation mode
# first. If this has a predictable result, then we don't bother
# running the test function for real here. If however we encounter
# some novel behaviour, we try again with the real test function,
# starting from the new novel prefix that has discovered.
try:
trial_data = self.new_conjecture_data(
prefix=prefix, max_length=max_length
)
self.tree.simulate_test_function(trial_data)
continue
except PreviouslyUnseenBehaviour:
pass
# If the simulation entered part of the tree that has been killed,
# we don't want to run this.
if trial_data.observer.killed:
continue
# We might have hit the cap on number of examples we should
# run when calculating the minimal example.
if not self.should_generate_more():
break
prefix = trial_data.buffer
else:
max_length = BUFFER_SIZE
data = self.new_conjecture_data(prefix=prefix, max_length=max_length)
self.test_function(data)
self.generate_mutations_from(data)
# Although the optimisations are logically a distinct phase, we
# actually normally run them as part of example generation. The
# reason for this is that we cannot guarantee that optimisation
# actually exhausts our budget: It might finish running and we
# discover that actually we still could run a bunch more test cases
# if we want.
if (
self.valid_examples >= max(small_example_cap, optimise_at)
and not ran_optimisations
):
ran_optimisations = True
self.optimise_targets()
def generate_mutations_from(self, data):
# A thing that is often useful but rarely happens by accident is
# to generate the same value at multiple different points in the
# test case.
#
# Rather than make this the responsibility of individual strategies
# we implement a small mutator that just takes parts of the test
# case with the same label and tries replacing one of them with a
# copy of the other and tries running it. If we've made a good
# guess about what to put where, this will run a similar generated
# test case with more duplication.
if (
> # An OVERRUN doesn't have enough information about the test
# case to mutate, so we just skip those.
data.status >= Status.INVALID
# This has a tendency to trigger some weird edge cases during
# generation so we don't let it run until we're done with the
# health checks.
and self.health_check_state is None
):
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:746:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
> ???
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:867:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
with self._log_phase_statistics("shrink"):
self.shrink_interesting_examples()
> self.exit_with(ExitReason.finished)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:874:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.pareto.ParetoOptimiser object at 0x7ff64c291c70>
def run(self):
seen = set()
# We iterate backwards through the pareto front, using the shrinker to
# (hopefully) replace each example with a smaller one. Note that it's
# important that we start from the end for two reasons: Firstly, by
# doing it this way we ensure that any new front members we discover
# during optimisation will also get optimised (because they will be
# inserted into the part of the front that we haven't visited yet),
# and secondly we generally expect that we will not finish this process
# in a single run, because it's relatively expensive in terms of our
# example budget, and by starting from the end we ensure that each time
# we run the tests we improve the pareto front because we work on the
# bits that we haven't covered yet.
i = len(self.front) - 1
prev = None
while i >= 0 and not self.__engine.interesting_examples:
assert self.front
i = min(i, len(self.front) - 1)
target = self.front[i]
if target.buffer in seen:
i -= 1
continue
assert target is not prev
prev = target
def allow_transition(source, destination):
"""Shrink to data that strictly pareto dominates the current
best value we've seen, which is the current target of the
shrinker.
Note that during shrinking we may discover other smaller
examples that this function will reject and will get added to
the front. This is fine, because they will be processed on
later iterations of this loop."""
if dominance(destination, source) == DominanceRelation.LEFT_DOMINATES:
# If ``destination`` dominates ``source`` then ``source``
# must be dominated in the front - either ``destination`` is in
# the front, or it was not added to it because it was
# dominated by something in it.,
try:
self.front.front.remove(source)
except ValueError:
pass
return True
return False
> shrunk = self.__engine.shrink(target, allow_transition=allow_transition)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/pareto.py:328:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.engine.ConjectureRunner object at 0x7ff69761fe20>
example = ConjectureResult(status=Status.VALID, interesting_origin=None, buffer=b'\x01\x01\x00\x00\x01\x00\x00', blocks=Block([B...139221), StructuralCoverageTag(label=6215470858326696968), StructuralCoverageTag(label=3299382349054467147)}), index=7)
predicate = None, allow_transition = <function ParetoOptimiser.run.<locals>.allow_transition at 0x7ff660108820>
def cached_test_function(self, buffer, error_on_discard=False, extend=0):
"""Checks the tree to see if we've tested this buffer, and returns the
> previous result if we have.
Otherwise we call through to ``test_function``, and return a
fresh result.
If ``error_on_discard`` is set to True this will raise ``ContainsDiscard``
in preference to running the actual test function. This is to allow us
to skip test cases we expect to be redundant in some cases. Note that
it may be the case that we don't raise ``ContainsDiscard`` even if the
result has discards if we cannot determine from previous runs whether
it will have a discard.
"""
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/engine.py:984:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>
def shrink(self):
"""Run the full set of shrinks and update shrink_target.
This method is "mostly idempotent" - calling it twice is unlikely to
have any effect, though it has a non-zero probability of doing so.
"""
# We assume that if an all-zero block of bytes is an interesting
# example then we're not going to do better than that.
# This might not technically be true: e.g. for integers() | booleans()
# the simplest example is actually [1, 0]. Missing this case is fairly
# harmless and this allows us to make various simplifying assumptions
# about the structure of the data (principally that we're never
# operating on a block of all zero bytes so can use non-zeroness as a
# signpost of complexity).
if not any(self.shrink_target.buffer) or self.incorporate_new_buffer(
bytes(len(self.shrink_target.buffer))
):
self.explain()
return
try:
> self.greedy_shrink()
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:446:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>
def greedy_shrink(self):
"""Run a full set of greedy shrinks (that is, ones that will only ever
move to a better target) and update shrink_target appropriately.
This method iterates to a fixed point and so is idempontent - calling
it twice will have exactly the same effect as calling it once.
"""
> self.fixate_shrink_passes(
[
block_program("X" * 5),
block_program("X" * 4),
block_program("X" * 3),
block_program("X" * 2),
block_program("X" * 1),
"pass_to_descendant",
"reorder_examples",
"minimize_floats",
"minimize_duplicated_blocks",
block_program("-XX"),
"minimize_individual_blocks",
block_program("--X"),
"redistribute_block_pairs",
"lower_blocks_together",
]
+ [dfa_replacement(n) for n in SHRINKING_DFAS]
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:635:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>
passes = [ShrinkPass(run_with_chooser=<function shrink_pass_family.<locals>.accept.<locals>.run at 0x7ff6084213a0>, index=0, sh...ecture.shrinker.Shrinker object at 0x7ff62fb7b070>, last_prefix=(), successes=0, calls=0, shrinks=0, deletions=0), ...]
def fixate_shrink_passes(self, passes):
"""Run steps from each pass in ``passes`` until the current shrink target
is a fixed point of all of them."""
passes = list(map(self.shrink_pass, passes))
any_ran = True
while any_ran:
any_ran = False
reordering = {}
# We run remove_discarded after every pass to do cleanup
# keeping track of whether that actually works. Either there is
# no discarded data and it is basically free, or it reliably works
# and deletes data, or it doesn't work. In that latter case we turn
# it off for the rest of this loop through the passes, but will
# try again once all of the passes have been run.
can_discard = self.remove_discarded()
calls_at_loop_start = self.calls
# We keep track of how many calls can be made by a single step
# without making progress and use this to test how much to pad
# out self.max_stall by as we go along.
max_calls_per_failing_step = 1
for sp in passes:
if can_discard:
can_discard = self.remove_discarded()
before_sp = self.shrink_target
# Run the shrink pass until it fails to make any progress
# max_failures times in a row. This implicitly boosts shrink
# passes that are more likely to work.
failures = 0
max_failures = 20
while failures < max_failures:
# We don't allow more than max_stall consecutive failures
# to shrink, but this means that if we're unlucky and the
# shrink passes are in a bad order where only the ones at
# the end are useful, if we're not careful this heuristic
# might stop us before we've tried everything. In order to
# avoid that happening, we make sure that there's always
# plenty of breathing room to make it through a single
# iteration of the fixate_shrink_passes loop.
self.max_stall = max(
self.max_stall,
2 * max_calls_per_failing_step
+ (self.calls - calls_at_loop_start),
)
prev = self.shrink_target
initial_calls = self.calls
# It's better for us to run shrink passes in a deterministic
# order, to avoid repeat work, but this can cause us to create
# long stalls when there are a lot of steps which fail to do
# anything useful. In order to avoid this, once we've noticed
# we're in a stall (i.e. half of max_failures calls have failed
# to do anything) we switch to randomly jumping around. If we
# find a success then we'll resume deterministic order from
# there which, with any luck, is in a new good region.
> if not sp.step(random_order=failures >= max_failures // 2):
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:721:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = ShrinkPass(run_with_chooser=<function shrink_pass_family.<locals>.accept.<locals>.run at 0x7ff6084213a0>, index=0, shr...l.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>, last_prefix=(), successes=0, calls=0, shrinks=0, deletions=0)
random_order = False
def step(self, random_order=False):
tree = self.shrinker.shrink_pass_choice_trees[self]
if tree.exhausted:
return False
initial_shrinks = self.shrinker.shrinks
initial_calls = self.shrinker.calls
size = len(self.shrinker.shrink_target.buffer)
self.shrinker.engine.explain_next_call_as(self.name)
if random_order:
selection_order = random_selection_order(self.shrinker.random)
else:
selection_order = prefix_selection_order(self.last_prefix)
try:
> self.last_prefix = tree.step(
selection_order,
lambda chooser: self.run_with_chooser(self.shrinker, chooser),
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:1572:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.choicetree.ChoiceTree object at 0x7ff65f9cb6a0>
selection_order = <function prefix_selection_order.<locals>.selection_order at 0x7ff660108f70>, f = <function ShrinkPass.step.<locals>.<lambda> at 0x7ff660108790>
def step(
self,
selection_order: Callable[[int, int], Iterable[int]],
f: Callable[[Chooser], None],
) -> Sequence[int]:
assert not self.exhausted
chooser = Chooser(self, selection_order)
try:
> f(chooser)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/choicetree.py:138:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
chooser = <hypothesis.internal.conjecture.choicetree.Chooser object at 0x7ff65f9cbd60>
> lambda chooser: self.run_with_chooser(self.shrinker, chooser),
)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:1574:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>
chooser = <hypothesis.internal.conjecture.choicetree.Chooser object at 0x7ff65f9cbd60>
def run(self, chooser):
> return f(self, chooser, *args)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:1458:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>
chooser = <hypothesis.internal.conjecture.choicetree.Chooser object at 0x7ff65f9cbd60>, description = 'XXXXX'
@shrink_pass_family
def block_program(self, chooser, description):
"""Mini-DSL for block rewriting. A sequence of commands that will be run
over all contiguous sequences of blocks of the description length in order.
Commands are:
* ".", keep this block unchanged
* "-", subtract one from this block.
* "0", replace this block with zero
* "X", delete this block
If a command does not apply (currently only because it's - on a zero
block) the block will be silently skipped over. As a side effect of
running a block program its score will be updated.
"""
n = len(description)
"""Adaptively attempt to run the block program at the current
index. If this successfully applies the block program ``k`` times
then this runs in ``O(log(k))`` test function calls."""
i = chooser.choose(range(len(self.shrink_target.blocks) - n))
# First, run the block program at the chosen index. If this fails,
# don't do any extra work, so that failure is as cheap as possible.
> if not self.run_block_program(i, description, original=self.shrink_target):
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:1491:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>, i = 1, description = 'XXXXX'
original = ConjectureResult(status=Status.VALID, interesting_origin=None, buffer=b'\x01\x01\x00\x00\x01\x00\x00', blocks=Block([B...139221), StructuralCoverageTag(label=6215470858326696968), StructuralCoverageTag(label=3299382349054467147)}), index=7)
repeats = 1
def run_block_program(self, i, description, original, repeats=1):
"""Block programs are a mini-DSL for block rewriting, defined as a sequence
of commands that can be run at some index into the blocks
Commands are:
* "-", subtract one from this block.
* "X", delete this block
If a command does not apply (currently only because it's - on a zero
block) the block will be silently skipped over.
This method runs the block program in ``description`` at block index
``i`` on the ConjectureData ``original``. If ``repeats > 1`` then it
will attempt to approximate the results of running it that many times.
Returns True if this successfully changes the underlying shrink target,
else False.
"""
if i + len(description) > len(original.blocks) or i < 0:
return False
attempt = bytearray(original.buffer)
for _ in range(repeats):
for k, d in reversed(list(enumerate(description))):
j = i + k
u, v = original.blocks[j].bounds
if v > len(attempt):
return False
if d == "-":
value = int_from_bytes(attempt[u:v])
if value == 0:
return False
else:
attempt[u:v] = int_to_bytes(value - 1, v - u)
elif d == "X":
del attempt[u:v]
else:
raise NotImplementedError(f"Unrecognised command {d!r}")
> return self.incorporate_new_buffer(attempt)
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:1449:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <hypothesis.internal.conjecture.shrinker.Shrinker object at 0x7ff62fb7b070>, buffer = b'\x01\x00'
def incorporate_new_buffer(self, buffer):
"""Either runs the test function on this buffer and returns True if
that changed the shrink_target, or determines that doing so would
be useless and returns False without running it."""
buffer = bytes(buffer[: self.shrink_target.index])
# Sometimes an attempt at lexicographic minimization will do the wrong
# thing because the buffer has changed under it (e.g. something has
# turned into a write, the bit size has changed). The result would be
# an invalid string, but it's better for us to just ignore it here as
# it turns out to involve quite a lot of tricky book-keeping to get
# this right and it's better to just handle it in one place.
if sort_key(buffer) >= sort_key(self.shrink_target.buffer):
return False
if self.shrink_target.buffer.startswith(buffer):
return False
previous = self.shrink_target
self.cached_test_function(buffer)
> return previous is not self.shrink_target
E KeyboardInterrupt
../../anaconda3/envs/ndindex/lib/python3.8/site-packages/hypothesis/internal/conjecture/shrinker.py:391: KeyboardInterrupt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment