Source code for schrodinger.test.pytest_customizations

"""
Local Schrodinger py.test customizations.

"""
import inspect
import pathlib
import re
import warnings
from unittest import mock

import pytest
from _pytest.fixtures import getfixturemarker

# Register our custom pytest and other test code for assertion rewriting before
# import of modules
for _test_module in pathlib.Path(__file__).parent.iterdir():
    # registering the same file causes a warning
    if _test_module == pathlib.Path(__file__):
        continue
    pytest.register_assert_rewrite(f"schrodinger.test.{_test_module.stem}")
del _test_module
# isort: split

from .pytest import exetest
from .pytest import fixture
from .pytest import reporter
from .pytest import sessionfixture
from .pytest import startup


[docs]def register_fixtures(module): """ Fixtures need to be registered in the pytest_customizations module. :param module: module to register pytest.fixture functions """ for name, obj in inspect.getmembers(module): if getfixturemarker(obj): globals()[name] = obj
register_fixtures(sessionfixture) register_fixtures(fixture)
[docs]def pytest_runtest_setup(item): sessionfixture.runtest_setup(item)
[docs]@pytest.hookimpl(trylast=True) def pytest_runtest_teardown(item, nextitem): sessionfixture.runtest_teardown(item, nextitem)
[docs]def pytest_addoption(parser): startup.addoption(parser)
[docs]def pytest_cmdline_main(config): return startup.cmdline_main(config)
[docs]@pytest.mark.tryfirst def pytest_configure(config): startup.configure(config)
[docs]def pytest_sessionstart(session): warnings.filterwarnings("ignore", message=".*some images may be missing", category=RuntimeWarning, module="schrodinger.ui.qt.style")
# This prints out K and killed when the test is killed, but doesn't include # it in the summary
[docs]def pytest_report_teststatus(report): """Put Killed tests into a separate group from other failures.""" if report.when == "call": try: if report.longrepr and "Killed process after timeout" in str( report.longrepr): return 'killed', 'K', 'KILLED' except TypeError: pass
[docs]def pytest_runtest_makereport(item, call): return reporter.runtest_makereport(item, call)
[docs]def pytest_runtest_logreport(report): reporter.runtest_logreport(report)
[docs]def pytest_collectreport(report): reporter.collectreport(report)
[docs]@pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_terminal_summary(terminalreporter): reporter.terminal_summary(terminalreporter) yield
[docs]def pytest_sessionfinish(session, exitstatus): reporter.sessionfinish(session, exitstatus)
[docs]def pytest_unconfigure(config): reporter.unconfigure(config)
[docs]def pytest_collect_file(parent, path): """ Pytest function: Should "path" be collected as a test? Adds compiled tests. """ if ((path.ext in (".exe", ".pl", ".t") or not path.ext) and 'test' in path.basename and 'cutest' not in path.basename): return exetest.ExecutableFile.from_parent(parent, fspath=path)
[docs]def pytest_pycollect_makemodule(path, parent): """ For all Python test files, use `ModuleWithPatchCheck` instead of the normal `pytest.Module` class. """ return ModuleWithPatchCheck.from_parent(parent, fspath=path)
[docs]def pytest_itemcollected(item): """Don't run most Python tests under memtest.""" # Some python tests use an allow_memtest fixture to mark that they need # memtest. Doctest and some other test types may not allow fixtures. if (isinstance(item, exetest.ExecutableTest) or 'allow_memtest' in getattr(item, 'fixturenames', [])): item.add_marker(pytest.mark.memtest)
[docs]def pytest_collection_modifyitems(items): for item in items: # Mark all tests with gui in them as require_display filename = item.nodeid.split("::")[0] if re.search("gui", filename, re.IGNORECASE): item.add_marker(pytest.mark.require_display)
[docs]def pytest_collection_finish(session): """ Work-around for a bad cache of conftest.py Removes the cache after all tests have been loaded. At this point, all available conftests will also be loaded, so the caching won't be a problem. Still leaves us vulnerable to incorrect caching during test discovery, though. See https://github.com/pytest-dev/pytest/issues/2016 """ session._fs2hookproxy = {}
[docs]def pytest_addhooks(pluginmanager): """ Add a hook to designate an owner of each test. """ from pluggy import HookspecMarker hookspec = HookspecMarker("pytest") class newhooks: @staticmethod @hookspec(firstresult=True) def pytest_test_owners(): """ Specify the owners of tests in this directory. :return: tuple of usernames """ pluginmanager.add_hookspecs(newhooks)
[docs]class ModuleWithPatchCheck(pytest.Module): """ A Module collector that makes sure there are no active patches after module import and again after all tests in the module have completed. """ def _getobj(self): obj = super()._getobj() # Check to see if the import left any active patches. If it did, report # a collection failure, which will prevent any tests from running on # developer machines but will still allow other tests to run on buildbot # (since buildbot has continue_on_collection_errors set to True). patch_targets = self._stopPatches() if patch_targets: raise self.CollectError( f"Active patches found after import of {self.name} for the " f"following targets:\n {', '.join(patch_targets)}\n") return obj
[docs] def teardown(self): super().teardown() # super().teardown() will trigger teardown_module and tearDownModule, if # either is present, so any patches that are still active here are # definitely a problem. patch_targets = self._stopPatches() if patch_targets: # this will cause the last test of the module to report an error raise RuntimeError( f"Active patches found during teardown of {self.name} for the " f"following targets:\n {', '.join(patch_targets)}\n" f"Note that the test that failed may not be the test " f"responsible for these patches.")
def _stopPatches(self): """ Stop any active patches. :return: The targets of all patches that were stopped, or None if no patches were active. :rtype: list[str] """ active_patches = mock._patch._active_patches if not active_patches: return None targets = [f"{p.target.__name__}.{p.attribute}" for p in active_patches] mock.patch.stopall() return targets