From 16abdbf2266b10049da7f9b9a91f97cf34e0e375 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20Andr=C3=A9s=20Margffoy=20Tuay?= Date: Wed, 28 Aug 2024 18:55:21 -0500 Subject: [PATCH 1/3] Improve unittest collection support --- src/pytest_run_parallel/plugin.py | 50 +++++++++++++++++-------- tests/test_run_parallel.py | 61 ++++++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 17 deletions(-) diff --git a/src/pytest_run_parallel/plugin.py b/src/pytest_run_parallel/plugin.py index 3acbac7..454caa3 100644 --- a/src/pytest_run_parallel/plugin.py +++ b/src/pytest_run_parallel/plugin.py @@ -2,6 +2,8 @@ import threading import functools +from _pytest.outcomes import Skipped, Failed + def pytest_addoption(parser): group = parser.getgroup('run-parallel') @@ -22,26 +24,30 @@ def pytest_configure(config): 'using `n` threads.') -@pytest.hookimpl(trylast=True) -def pytest_generate_tests(metafunc): - n_workers = metafunc.config.option.parallel_threads - m = metafunc.definition.get_closest_marker('parallel_threads') - if m is not None: - n_workers = int(m.args[0]) - setattr(metafunc.function, '_n_workers', n_workers) - - def wrap_function_parallel(fn, n_workers=10): barrier = threading.Barrier(n_workers) + lock = threading.Lock() @functools.wraps(fn) def inner(*args, **kwargs): errors = [] + warns = [] + skip = None + failed = None def closure(*args, **kwargs): barrier.wait() try: fn(*args, **kwargs) + except Warning as w: + pass except Exception as e: - errors.append(e) + with lock: + errors.append(e) + except Skipped as s: + nonlocal skip + skip = s.msg + except Failed as f: + nonlocal failed + failed = f workers = [] for _ in range(0, n_workers): @@ -56,14 +62,26 @@ def closure(*args, **kwargs): for worker in workers: worker.join() - if len(errors) > 0: + # if len(warns) > 0: + # warn = warns[0] + # warnings.warn(str(warn), type(warn)) + + if skip is not None: + pytest.skip(skip) + elif failed is not None: + # pytest.fail(failed) + raise failed + elif len(errors) > 0: raise errors[0] + return inner -@pytest.hookimpl(wrapper=True) -def pytest_pyfunc_call(pyfuncitem): - n_workers = getattr(pyfuncitem.obj, '_n_workers', None) +@pytest.hookimpl(trylast=True) +def pytest_itemcollected(item): + n_workers = item.config.option.parallel_threads + m = item.get_closest_marker('parallel_threads') + if m is not None: + n_workers = int(m.args[0]) if n_workers is not None and n_workers > 1: - pyfuncitem.obj = wrap_function_parallel(pyfuncitem.obj, n_workers) - return (yield) + item.obj = wrap_function_parallel(item.obj, n_workers) diff --git a/tests/test_run_parallel.py b/tests/test_run_parallel.py index 8221fd1..d08e7d7 100644 --- a/tests/test_run_parallel.py +++ b/tests/test_run_parallel.py @@ -84,7 +84,7 @@ def test_check_thread_count(counter): @pytest.mark.order(2) @pytest.mark.parallel_threads(1) - def test_check_thread_count(counter2): + def test_check_thread_count2(counter2): assert counter2._count == 5 """) @@ -97,6 +97,65 @@ def test_check_thread_count(counter2): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines([ '*::test_check_thread_count PASSED*', + '*::test_check_thread_count2 PASSED*', + ]) + + # make sure that we get a '0' exit code for the testsuite + assert result.ret == 0 + + +def test_unittest_compat(pytester): + # create a temporary pytest test module + pytester.makepyfile(""" + import pytest + import unittest + from threading import Lock + + class Counter: + def __init__(self): + self._count = 0 + self._lock = Lock() + + def increase(self): + with self._lock: + self._count += 1 + + class TestExample(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.counter = Counter() + cls.counter2 = Counter() + + @pytest.mark.order(1) + def test_example_1(self): + self.counter.increase() + + @pytest.mark.order(1) + @pytest.mark.parallel_threads(5) + def test_example_2(self): + self.counter2.increase() + + @pytest.mark.order(2) + @pytest.mark.parallel_threads(1) + def test_check_thread_count(self): + assert self.counter._count == 10 + + @pytest.mark.order(2) + @pytest.mark.parallel_threads(1) + def test_check_thread_count2(self): + assert self.counter2._count == 5 + """) + + # run pytest with the following cmd args + result = pytester.runpytest( + '--parallel-threads=10', + '-v' + ) + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + '*::test_check_thread_count PASSED*', + '*::test_check_thread_count2 PASSED*', ]) # make sure that we get a '0' exit code for the testsuite From c4288dd82804b291c49318dbb69c6089ca662b80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20Andr=C3=A9s=20Margffoy=20Tuay?= Date: Mon, 2 Sep 2024 08:48:54 -0500 Subject: [PATCH 2/3] Remove unused code --- src/pytest_run_parallel/plugin.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/pytest_run_parallel/plugin.py b/src/pytest_run_parallel/plugin.py index 454caa3..2830a4a 100644 --- a/src/pytest_run_parallel/plugin.py +++ b/src/pytest_run_parallel/plugin.py @@ -26,11 +26,9 @@ def pytest_configure(config): def wrap_function_parallel(fn, n_workers=10): barrier = threading.Barrier(n_workers) - lock = threading.Lock() @functools.wraps(fn) def inner(*args, **kwargs): errors = [] - warns = [] skip = None failed = None def closure(*args, **kwargs): @@ -40,8 +38,7 @@ def closure(*args, **kwargs): except Warning as w: pass except Exception as e: - with lock: - errors.append(e) + errors.append(e) except Skipped as s: nonlocal skip skip = s.msg @@ -62,14 +59,9 @@ def closure(*args, **kwargs): for worker in workers: worker.join() - # if len(warns) > 0: - # warn = warns[0] - # warnings.warn(str(warn), type(warn)) - if skip is not None: pytest.skip(skip) elif failed is not None: - # pytest.fail(failed) raise failed elif len(errors) > 0: raise errors[0] From 2b95eefdd2bf852825549ea05c25550b7aeab4eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edgar=20Andr=C3=A9s=20Margffoy=20Tuay?= Date: Mon, 2 Sep 2024 08:55:10 -0500 Subject: [PATCH 3/3] Add tests for exception and pytest event handling --- .gitignore | 1 + tests/test_run_parallel.py | 77 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/.gitignore b/.gitignore index 9b6398d..45a4626 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ htmlcov/ nosetests.xml coverage.xml *.cover +*.lcov *.py,cover .hypothesis/ .pytest_cache/ diff --git a/tests/test_run_parallel.py b/tests/test_run_parallel.py index d08e7d7..a530d81 100644 --- a/tests/test_run_parallel.py +++ b/tests/test_run_parallel.py @@ -173,3 +173,80 @@ def test_help_message(pytester): # ' Set the number of threads used to execute each test concurrently.', ]) + +def test_skip(pytester): + """Make sure that pytest accepts our fixture.""" + + # create a temporary pytest test module + pytester.makepyfile(""" + import pytest + + def test_skipped(): + pytest.skip('Skip propagation') + """) + + # run pytest with the following cmd args + result = pytester.runpytest( + '--parallel-threads=10', + '-v' + ) + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + '*::test_skipped SKIPPED*', + ]) + + # make sure that we get a '0' exit code for the testsuite + assert result.ret == 0 + + +def test_fail(pytester): + """Make sure that pytest accepts our fixture.""" + + # create a temporary pytest test module + pytester.makepyfile(""" + import pytest + + def test_should_fail(): + pytest.fail() + """) + + # run pytest with the following cmd args + result = pytester.runpytest( + '--parallel-threads=10', + '-v' + ) + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + '*::test_should_fail FAILED*', + ]) + + # make sure that we get a '0' exit code for the testsuite + assert result.ret != 0 + + +def test_exception(pytester): + """Make sure that pytest accepts our fixture.""" + + # create a temporary pytest test module + pytester.makepyfile(""" + import pytest + + def test_should_fail(): + raise ValueError('Should raise') + """) + + # run pytest with the following cmd args + result = pytester.runpytest( + '--parallel-threads=10', + '-v' + ) + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + '*::test_should_fail FAILED*', + ]) + + # make sure that we get a '0' exit code for the testsuite + assert result.ret != 0