Imported Upstream version 13.0 upstream/13.0
authorTizenOpenSource <tizenopensrc@samsung.com>
Wed, 27 Dec 2023 08:39:58 +0000 (17:39 +0900)
committerTizenOpenSource <tizenopensrc@samsung.com>
Wed, 27 Dec 2023 08:39:58 +0000 (17:39 +0900)
16 files changed:
CHANGES.rst
CONTRIBUTING.rst
HEADER.rst [new file with mode: 0644]
MANIFEST.in
PKG-INFO
README.rst
pyproject.toml [new file with mode: 0644]
pytest_rerunfailures.egg-info/PKG-INFO
pytest_rerunfailures.egg-info/SOURCES.txt
pytest_rerunfailures.egg-info/entry_points.txt
pytest_rerunfailures.egg-info/requires.txt
pytest_rerunfailures.py
setup.cfg
setup.py [changed mode: 0644->0755]
test_pytest_rerunfailures.py
tox.ini

index 1ef32cb382f1a402f6a834b431041130c1e7394d..7532665a8a100a00468362bfa50f5a49f1b7cc35 100644 (file)
@@ -1,6 +1,177 @@
 Changelog
 =========
 
+13.0 (2023-11-22)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 7.0.
+
+Features
+++++++++
+
+- Add support for Python 3.12.
+
+Bug fixes
++++++++++
+
+- Fix crashitem names mismatch between client and server.
+  (`#172 <https://github.com/pytest-dev/pytest-rerunfailures/issues/172>`_)
+
+- Fix crash when setup fails with --rerun-except flag.
+  (`#230 <https://github.com/pytest-dev/pytest-rerunfailures/issues/230>`_)
+
+12.0 (2023-07-05)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 6.2.
+
+Features
+++++++++
+
+- Add ``only_rerun`` and ``rerun_except`` arguments to ``@pytest.mark.flaky`` marker.
+
+- Add support for pytest 7.3, 7.4.
+
+Bug fixes
++++++++++
+
+- Failures are now rerun only if they match at least one ``--only-rerun``
+  pattern (if given) and none of the ``--rerun-except`` patterns. Previously,
+  using both ``--only-rerun`` and ``--rerun-except`` together could cause
+  failures to be rerun even if they did not match any ``--only-rerun``
+  pattern, and when using multiple ``--rerun-except`` patterns, all failures
+  would be rerun unless they matched every pattern.
+  (`#225 <https://github.com/pytest-dev/pytest-rerunfailures/issues/225>`_)
+
+
+11.1.2 (2023-03-09)
+-------------------
+
+Bug fixes
++++++++++
+
+- Execute teardown when test was skipped in setup phase of a fixture.
+
+
+11.1.1 (2023-02-17)
+-------------------
+
+Bug fixes
++++++++++
+
+- Fix crash during teardown when runtest protocol hook is overwritten by
+  another plugin.
+
+- Fix crash during teardown when TestCase class is used as base class.
+
+
+11.1 (2023-02-09)
+-----------------
+
+Bug fixes
++++++++++
+
+- Run teardown of session, class, ... scoped fixtures only once after rerunning tests
+
+Features
+++++++++
+
+- Expose `reruns` and `reruns_delay` through `pytest.ini` file.
+
+
+11.0 (2023-01-12)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for Python 3.6.
+
+- Drop support for pytest < 6.
+
+Bug fixes
++++++++++
+
+- Fix crash when pytest-xdist is installed but disabled.
+  (Thanks to `@mgorny <https://github.com/mgorny>`_ for the PR.)
+
+- Fix crash when xfail(strict=True) mark is used with --rerun-only flag.
+
+Features
+++++++++
+
+- Added option `--rerun-except` to rerun failed tests those are other than the mentioned Error.
+
+- Add support for Python 3.11.
+
+- Add support for pytest 7.0, 7.1, 7.2.
+
+
+10.2 (2021-09-17)
+-----------------
+
+Features
+++++++++
+
+- Allow recovery from crashed tests with pytest-xdist.
+- Add support for Python 3.10 (as of Python 3.10.rc2).
+  (Thanks to `@hugovk <https://github.com/hugovk>`_ for the PR.)
+
+
+10.1 (2021-07-02)
+-----------------
+
+Features
+++++++++
+
+- Allows using a ``str`` as condition for
+  ``@pytest.mark.flaky(condition)``
+  which gets evaluated dynamically similarly to
+  ``@pytest.mark.skipif(condition)``.
+  (`#162 <https://github.com/pytest-dev/pytest-rerunfailures/pull/162>`_
+  provided by `@15klli <https://github.com/15klli>`_)
+
+10.0 (2021-05-26)
+-----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for Python 3.5.
+
+- Drop support for pytest < 5.3.
+
+Features
+++++++++
+
+- Add ``condition`` keyword argument to the re-run marker.
+  (Thanks to `@BeyondEvil`_ for the PR.)
+
+- Add support for Python 3.9.
+  (Thanks to `@digitronik`_ for the PR.)
+
+- Add support for pytest 6.3.
+  (Thanks to `@bluetech`_ for the PR.)
+
+- Add compatibility with ``pytest-xdist >= 2.0``.
+  (Thanks to `@bluetech`_ for the PR.)
+
+Other changes
++++++++++++++
+
+- Check for the resultlog by feature and not by version as pytest master does
+  not provide a consistent version.
+
+.. _@BeyondEvil: https://github.com/BeyondEvil
+.. _@digitronik: https://github.com/digitronik
+.. _@bluetech: https://github.com/bluetech
+
 9.1.1 (2020-09-29)
 ------------------
 
@@ -147,11 +318,11 @@ Bug fixes
 4.2 (2018-10-04)
 ----------------
 
-- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
-  `@OlegKuzovkov`_ for the PR).
+- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on
+  rerun (Thanks to `@OlegKuzovkov`_ for the PR).
 
-- Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
-  (Thanks to `@OlegKuzovkov`_ for the PR).
+- Added new ``execution_count`` attribute to reflect the number of test case
+  executions according to #67 issue. (Thanks to `@OlegKuzovkov`_ for the PR).
 
 .. _@OlegKuzovkov: https://github.com/OlegKuzovkov
 
index d180a35d71270f3f30e00afefc4cdfbe783bb022..24e16dd496dc838c98632b02dbe90e3500d3ef91 100644 (file)
@@ -2,8 +2,8 @@
 Contribution getting started
 ============================
 
-Contributions are highly welcomed and appreciated. Every little bit of help counts,
-so do not hesitate!
+Contributions are highly welcomed and appreciated.
+Every little bit of help counts, so do not hesitate!
 
 .. contents::
    :depth: 2
@@ -37,4 +37,5 @@ Preparing Pull Requests
 
 #. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
 
-#. Add a line item to the current **unreleased** version in ``CHANGES.rst``, unless the change is trivial.
+#. Add a line item to the current **unreleased** version in ``CHANGES.rst``,
+   unless the change is trivial.
diff --git a/HEADER.rst b/HEADER.rst
new file mode 100644 (file)
index 0000000..cfeffa8
--- /dev/null
@@ -0,0 +1 @@
+.. contents::
index d587163be0f16823dedf6f808f29acce983c1409..f88eebee5b1b8780818c1a1ba5235226c6f18299 100644 (file)
@@ -1,4 +1,5 @@
 include *.py
 include *.rst
+include *.toml
 include LICENSE
 include tox.ini
index fcbc91634eafd67a9440a933c78d1dd96b320d17..204b2526d31873978f5c9ca5bf94e94d4f9d4db4 100644 (file)
--- a/PKG-INFO
+++ b/PKG-INFO
-Metadata-Version: 1.2
+Metadata-Version: 2.1
 Name: pytest-rerunfailures
-Version: 9.1.1
+Version: 13.0
 Summary: pytest plugin to re-run tests to eliminate flaky failures
 Home-page: https://github.com/pytest-dev/pytest-rerunfailures
 Author: Leah Klearman
 Author-email: lklrmn@gmail.com
 License: Mozilla Public License 2.0 (MPL 2.0)
-Description: .. contents::
-        
-        pytest-rerunfailures
-        ====================
-        
-        pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
-        re-runs tests to eliminate intermittent failures.
-        
-        .. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
-           :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
-           :alt: License
-        .. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
-           :target: https://pypi.python.org/pypi/pytest-rerunfailures/
-           :alt: PyPI
-        .. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
-           :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
-           :alt: Travis
-        
-        Requirements
-        ------------
-        
-        You will need the following prerequisites in order to use pytest-rerunfailures:
-        
-        - Python 3.5, up to 3.8, or PyPy3
-        - pytest 5.0 or newer
-        
-        This package is currently tested against the last 5 minor pytest releases. In
-        case you work with an older version of pytest you should consider updating or
-        use one of the earlier versions of this package.
-        
-        Installation
-        ------------
-        
-        To install pytest-rerunfailures:
-        
-        .. code-block:: bash
-        
-          $ pip install pytest-rerunfailures
-        
-        Re-run all failures
-        -------------------
-        
-        To re-run all test failures, use the ``--reruns`` command line option with the
-        maximum number of times you'd like the tests to run:
-        
-        .. code-block:: bash
-        
-          $ pytest --reruns 5
-        
-        Failed fixture or setup_class will also be re-executed.
-        
-        To add a delay time between re-runs use the ``--reruns-delay`` command line
-        option with the amount of seconds that you would like wait before the next
-        test re-run is launched:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --reruns-delay 1
-        
-        Re-run all failures matching certain expressions
-        ------------------------------------------------
-        
-        To re-run only those failures that match a certain list of expressions, use the
-        ``--only-rerun`` flag and pass it a regular expression. For example, the following would
-        only rerun those errors that match ``AssertionError``:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --only-rerun AssertionError
-        
-        Passing the flag multiple times accumulates the arguments, so the following would only rerun
-        those errors that match ``AssertionError`` or ``ValueError``:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
-        
-        Re-run individual failures
-        --------------------------
-        
-        To mark individual tests as flaky, and have them automatically re-run when they
-        fail, add the ``flaky`` mark with the maximum number of times you'd like the
-        test to run:
-        
-        .. code-block:: python
-        
-          @pytest.mark.flaky(reruns=5)
-          def test_example():
-              import random
-              assert random.choice([True, False])
-        
-        Note that when teardown fails, two reports are generated for the case, one for
-        the test case and the other for the teardown error.
-        
-        You can also specify the re-run delay time in the marker:
-        
-        .. code-block:: python
-        
-          @pytest.mark.flaky(reruns=5, reruns_delay=2)
-          def test_example():
-              import random
-              assert random.choice([True, False])
-        
-        Output
-        ------
-        
-        Here's an example of the output provided by the plugin when run with
-        ``--reruns 2`` and ``-r aR``::
-        
-          test_report.py RRF
-        
-          ================================== FAILURES ==================================
-          __________________________________ test_fail _________________________________
-        
-              def test_fail():
-          >       assert False
-          E       assert False
-        
-          test_report.py:9: AssertionError
-          ============================ rerun test summary info =========================
-          RERUN test_report.py::test_fail
-          RERUN test_report.py::test_fail
-          ============================ short test summary info =========================
-          FAIL test_report.py::test_fail
-          ======================= 1 failed, 2 rerun in 0.02 seconds ====================
-        
-        Note that output will show all re-runs. Tests that fail on all the re-runs will
-        be marked as failed.
-        
-        Compatibility
-        -------------
-        
-        * This plugin may *not* be used with class, module, and package level fixtures.
-        * This plugin is *not* compatible with pytest-xdist's --looponfail flag.
-        * This plugin is *not* compatible with the core --pdb flag.
-        
-        Resources
-        ---------
-        
-        - `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
-        - `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
-        
-        Development
-        -----------
-        
-        * Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
-        
-          .. code-block:: python
-        
-            @hookimpl(tryfirst=True)
-            def pytest_runtest_makereport(item, call):
-                print(item.execution_count)
-        
-        
-        Changelog
-        =========
-        
-        9.1.1 (2020-09-29)
-        ------------------
-        
-        Compatibility fix.
-        ++++++++++++++++++
-        
-        - Ignore ``--result-log`` command line option when used together with ``pytest
-          >= 6.1.0``, as it was removed there. This is a quick fix, use an older
-          version of pytest, if you want to keep this feature for now.
-          (Thanks to `@ntessore`_ for the PR)
-        
-        - Support up to pytest 6.1.0.
-        
-        .. _@ntessore: https://github.com/ntessore
-        
-        
-        9.1 (2020-08-26)
-        ----------------
-        
-        Features
-        ++++++++
-        
-        - Add a new flag ``--only-rerun`` to allow for users to rerun only certain
-          errors.
-        
-        Other changes
-        +++++++++++++
-        
-        - Drop dependency on ``mock``.
-        
-        - Add support for pre-commit and add a linting tox target.
-          (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
-          (PR from `@gnikonorov`_)
-        
-        .. _@gnikonorov: https://github.com/gnikonorov
-        
-        
-        9.0 (2020-03-18)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 4.4, 4.5 and 4.6.
-        
-        - Drop support for Python 2.7.
-        
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest 5.4.
-        
-        - Add support for Python 3.8.
-        
-        
-        8.0 (2019-11-18)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
-        
-        - Drop support for Python 3.4.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
-        
-        Bug fixes
-        +++++++++
-        
-        - Explicitly depend on setuptools to ensure installation when working in
-          environments without it.
-          (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
-          (PR from `@Eric-Arellano`_)
-        
-        .. _@Eric-Arellano: https://github.com/Eric-Arellano
-        
-        
-        7.0 (2019-03-28)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.8 and 3.9.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.2 and 4.3.
-        
-        Bug fixes
-        +++++++++
-        
-        - Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
-          (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
-          (PR from `@KillAChicken`_)
-        
-        .. _@KillAChicken: https://github.com/KillAChicken
-        
-        
-        6.0 (2019-01-08)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.6 and 3.7.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.0 and 4.1.
-        
-        Bug fixes
-        +++++++++
-        
-        - Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
-          attribute on the test report.
-          (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
-          (Thanks to `@RibeiroAna`_ for the PR).
-        
-        .. _@RibeiroAna: https://github.com/RibeiroAna
-        
-        
-        5.0 (2018-11-06)
-        ----------------
-        
-        - Drop support for pytest versions < 3.6 to reduce the maintenance burden.
-        
-        - Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
-          releases.
-        
-        - Add support for Python 3.7.
-        
-        - Fix issue can occur when used together with `pytest-flake8`
-          (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
-        
-        
-        4.2 (2018-10-04)
-        ----------------
-        
-        - Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
-          `@OlegKuzovkov`_ for the PR).
-        
-        - Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
-          (Thanks to `@OlegKuzovkov`_ for the PR).
-        
-        .. _@OlegKuzovkov: https://github.com/OlegKuzovkov
-        
-        
-        4.1 (2018-05-23)
-        ----------------
-        
-        - Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
-          `@The-Compiler`_ for the PR).
-        
-        .. _@The-Compiler: https://github.com/The-Compiler
-        
-        4.0 (2017-12-23)
-        ----------------
-        
-        - Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
-          for the PR).
-        
-        - Added support for pytest >= 3.3.
-        
-        - Drop support for pytest < 2.8.7.
-        
-        .. _@Kanguros: https://github.com/Kanguros
-        
-        
-        3.1 (2017-08-29)
-        ----------------
-        
-        - Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
-        
-        .. _@davehunt: https://github.com/davehunt
-        
-        
-        3.0 (2017-08-17)
-        ----------------
-        
-        - Add support for Python 3.6.
-        
-        - Add support for pytest 2.9 up to 3.2
-        
-        - Drop support for Python 2.6 and 3.3.
-        
-        - Drop support for pytest < 2.7.
-        
-        
-        2.2 (2017-06-23)
-        ----------------
-        
-        - Ensure that other plugins can run after this one, in case of a global setting
-          ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
-        
-        .. _@sublee: https://github.com/sublee
-        
-        2.1.0 (2016-11-01)
-        ------------------
-        
-        - Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
-          without arguments.
-        
-        - Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
-        
-        .. _@tltx: https://github.com/tltx
-        
-        
-        2.0.1 (2016-08-10)
-        -----------------------------
-        
-        - Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
-        
-        - Fix error due to missing CHANGES.rst when creating the source distribution
-          by adding a MANIFEST.in.
-        
-        
-        2.0.0 (2016-04-06)
-        ------------------
-        
-        - Drop support for Python 3.2, since supporting it became too much of a hassle.
-          (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
-        
-        
-        1.0.2 (2016-03-29)
-        ------------------
-        
-        - Add support for `--resultlog` option by parsing reruns accordingly. (#28)
-        
-        
-        1.0.1 (2016-02-02)
-        ------------------
-        
-        - Improve package description and include CHANGELOG into description.
-        
-        
-        1.0.0 (2016-02-02)
-        ------------------
-        
-        - Rewrite to use newer API of pytest >= 2.3.0
-        
-        - Improve support for pytest-xdist by only logging the final result.
-          (Logging intermediate results will finish the test rather rerunning it.)
-        
 Keywords: py.test pytest rerun failures flaky
-Platform: UNKNOWN
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Framework :: Pytest
 Classifier: Intended Audience :: Developers
@@ -426,10 +17,687 @@ Classifier: Operating System :: MacOS :: MacOS X
 Classifier: Topic :: Software Development :: Quality Assurance
 Classifier: Topic :: Software Development :: Testing
 Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3
 Classifier: Programming Language :: Python :: 3.7
 Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3 :: Only
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Python: >=3.5
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: packaging>=17.1
+Requires-Dist: pytest>=7
+Requires-Dist: importlib-metadata>=1; python_version < "3.8"
+
+.. contents::
+
+pytest-rerunfailures
+====================
+
+pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
+re-runs tests to eliminate intermittent failures.
+
+.. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
+   :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
+   :alt: License
+.. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
+   :target: https://pypi.python.org/pypi/pytest-rerunfailures/
+   :alt: PyPI
+.. image:: https://github.com/pytest-dev/pytest-rerunfailures/workflows/Test/badge.svg
+   :target: https://github.com/pytest-dev/pytest-rerunfailures/actions
+   :alt: GitHub Actions
+
+Requirements
+------------
+
+You will need the following prerequisites in order to use pytest-rerunfailures:
+
+- Python 3.7, up to 3.12, or PyPy3
+- pytest 7.0 or newer
+
+This plugin can recover from a hard crash with the following optional
+prerequisites:
+
+- pytest-xdist 2.3.0 or newer
+
+This package is currently tested against the last 5 minor pytest releases. In
+case you work with an older version of pytest you should consider updating or
+use one of the earlier versions of this package.
+
+Installation
+------------
+
+To install pytest-rerunfailures:
+
+.. code-block:: bash
+
+  $ pip install pytest-rerunfailures
+
+Recover from hard crashes
+-------------------------
+
+If one or more tests trigger a hard crash (for example: segfault), this plugin
+will ordinarily be unable to rerun the test. However, if a compatible version of
+pytest-xdist is installed, and the tests are run within pytest-xdist using the `-n`
+flag, this plugin will be able to rerun crashed tests, assuming the workers and
+controller are on the same LAN (this assumption is valid for almost all cases
+because most of the time the workers and controller are on the same computer).
+If this assumption is not the case, then this functionality may not operate.
+
+Re-run all failures
+-------------------
+
+To re-run all test failures, use the ``--reruns`` command line option with the
+maximum number of times you'd like the tests to run:
+
+.. code-block:: bash
+
+  $ pytest --reruns 5
+
+Failed fixture or setup_class will also be re-executed.
+
+To add a delay time between re-runs use the ``--reruns-delay`` command line
+option with the amount of seconds that you would like wait before the next
+test re-run is launched:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --reruns-delay 1
+
+Re-run all failures matching certain expressions
+------------------------------------------------
+
+To re-run only those failures that match a certain list of expressions, use the
+``--only-rerun`` flag and pass it a regular expression. For example,
+the following would only rerun those errors that match ``AssertionError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --only-rerun AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that match ``AssertionError`` or ``ValueError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
+
+Re-run all failures other than matching certain expressions
+-----------------------------------------------------------
+
+To re-run only those failures that do not match a certain list of expressions, use the
+``--rerun-except`` flag and pass it a regular expression. For example,
+the following would only rerun errors other than that match ``AssertionError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that does not match with ``AssertionError`` or ``OSError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError --rerun-except OSError
+
+.. note::
+
+   When the ```AssertionError``` comes from the use of the ``assert`` keyword,
+   use ``--rerun-except assert`` instead::
+
+   $ pytest --reruns 5 --rerun-except assert
+
+Re-run individual failures
+--------------------------
+
+To mark individual tests as flaky, and have them automatically re-run when they
+fail, add the ``flaky`` mark with the maximum number of times you'd like the
+test to run:
+
+.. code-block:: python
+
+  @pytest.mark.flaky(reruns=5)
+  def test_example():
+      import random
+      assert random.choice([True, False])
+
+Note that when teardown fails, two reports are generated for the case, one for
+the test case and the other for the teardown error.
+
+You can also specify the re-run delay time in the marker:
+
+.. code-block:: python
+
+  @pytest.mark.flaky(reruns=5, reruns_delay=2)
+  def test_example():
+      import random
+      assert random.choice([True, False])
+
+You can also specify an optional ``condition`` in the re-run marker:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(reruns=5, condition=sys.platform.startswith("win32"))
+   def test_example():
+      import random
+      assert random.choice([True, False])
+
+Exception filtering can be accomplished by specifying regular expressions for
+``only_rerun`` and ``rerun_except``. They override the ``--only-rerun`` and
+``--rerun-except`` command line arguments, respectively.
+
+Arguments can be a single string:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(rerun_except="AssertionError")
+   def test_example():
+       raise AssertionError()
+
+Or a list of strings:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(only_rerun=["AssertionError", "ValueError"])
+   def test_example():
+       raise AssertionError()
+
+
+You can use ``@pytest.mark.flaky(condition)`` similarly as ``@pytest.mark.skipif(condition)``, see `pytest-mark-skipif <https://docs.pytest.org/en/6.2.x/reference.html#pytest-mark-skipif>`_
+
+.. code-block:: python
+
+    @pytest.mark.flaky(reruns=2,condition="sys.platform.startswith('win32')")
+    def test_example():
+        import random
+        assert random.choice([True, False])
+    # totally same as the above
+    @pytest.mark.flaky(reruns=2,condition=sys.platform.startswith("win32"))
+    def test_example():
+      import random
+      assert random.choice([True, False])
+
+Note that the test will re-run for any ``condition`` that is truthy.
+
+Output
+------
+
+Here's an example of the output provided by the plugin when run with
+``--reruns 2`` and ``-r aR``::
+
+  test_report.py RRF
+
+  ================================== FAILURES ==================================
+  __________________________________ test_fail _________________________________
+
+      def test_fail():
+  >       assert False
+  E       assert False
+
+  test_report.py:9: AssertionError
+  ============================ rerun test summary info =========================
+  RERUN test_report.py::test_fail
+  RERUN test_report.py::test_fail
+  ============================ short test summary info =========================
+  FAIL test_report.py::test_fail
+  ======================= 1 failed, 2 rerun in 0.02 seconds ====================
+
+Note that output will show all re-runs. Tests that fail on all the re-runs will
+be marked as failed.
+
+Compatibility
+-------------
+
+* This plugin may *not* be used with class, module, and package level fixtures.
+* This plugin is *not* compatible with pytest-xdist's --looponfail flag.
+* This plugin is *not* compatible with the core --pdb flag.
+* This plugin is *not* compatible with the plugin
+  `flaky <https://pypi.org/project/flaky/>`_, you can only have
+  ``pytest-rerunfailures`` or ``flaky`` but not both.
+
+Resources
+---------
+
+- `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
+- `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
+
+Development
+-----------
+
+* Test execution count can be retrieved from the ``execution_count`` attribute
+  in test ``item``'s object. Example:
+
+  .. code-block:: python
+
+    @hookimpl(tryfirst=True)
+    def pytest_runtest_makereport(item, call):
+        print(item.execution_count)
+
+Changelog
+=========
+
+13.0 (2023-11-22)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 7.0.
+
+Features
+++++++++
+
+- Add support for Python 3.12.
+
+Bug fixes
++++++++++
+
+- Fix crashitem names mismatch between client and server.
+  (`#172 <https://github.com/pytest-dev/pytest-rerunfailures/issues/172>`_)
+
+- Fix crash when setup fails with --rerun-except flag.
+  (`#230 <https://github.com/pytest-dev/pytest-rerunfailures/issues/230>`_)
+
+12.0 (2023-07-05)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 6.2.
+
+Features
+++++++++
+
+- Add ``only_rerun`` and ``rerun_except`` arguments to ``@pytest.mark.flaky`` marker.
+
+- Add support for pytest 7.3, 7.4.
+
+Bug fixes
++++++++++
+
+- Failures are now rerun only if they match at least one ``--only-rerun``
+  pattern (if given) and none of the ``--rerun-except`` patterns. Previously,
+  using both ``--only-rerun`` and ``--rerun-except`` together could cause
+  failures to be rerun even if they did not match any ``--only-rerun``
+  pattern, and when using multiple ``--rerun-except`` patterns, all failures
+  would be rerun unless they matched every pattern.
+  (`#225 <https://github.com/pytest-dev/pytest-rerunfailures/issues/225>`_)
+
+
+11.1.2 (2023-03-09)
+-------------------
+
+Bug fixes
++++++++++
+
+- Execute teardown when test was skipped in setup phase of a fixture.
+
+
+11.1.1 (2023-02-17)
+-------------------
+
+Bug fixes
++++++++++
+
+- Fix crash during teardown when runtest protocol hook is overwritten by
+  another plugin.
+
+- Fix crash during teardown when TestCase class is used as base class.
+
+
+11.1 (2023-02-09)
+-----------------
+
+Bug fixes
++++++++++
+
+- Run teardown of session, class, ... scoped fixtures only once after rerunning tests
+
+Features
+++++++++
+
+- Expose `reruns` and `reruns_delay` through `pytest.ini` file.
+
+
+11.0 (2023-01-12)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for Python 3.6.
+
+- Drop support for pytest < 6.
+
+Bug fixes
++++++++++
+
+- Fix crash when pytest-xdist is installed but disabled.
+  (Thanks to `@mgorny <https://github.com/mgorny>`_ for the PR.)
+
+- Fix crash when xfail(strict=True) mark is used with --rerun-only flag.
+
+Features
+++++++++
+
+- Added option `--rerun-except` to rerun failed tests those are other than the mentioned Error.
+
+- Add support for Python 3.11.
+
+- Add support for pytest 7.0, 7.1, 7.2.
+
+
+10.2 (2021-09-17)
+-----------------
+
+Features
+++++++++
+
+- Allow recovery from crashed tests with pytest-xdist.
+- Add support for Python 3.10 (as of Python 3.10.rc2).
+  (Thanks to `@hugovk <https://github.com/hugovk>`_ for the PR.)
+
+
+10.1 (2021-07-02)
+-----------------
+
+Features
+++++++++
+
+- Allows using a ``str`` as condition for
+  ``@pytest.mark.flaky(condition)``
+  which gets evaluated dynamically similarly to
+  ``@pytest.mark.skipif(condition)``.
+  (`#162 <https://github.com/pytest-dev/pytest-rerunfailures/pull/162>`_
+  provided by `@15klli <https://github.com/15klli>`_)
+
+10.0 (2021-05-26)
+-----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for Python 3.5.
+
+- Drop support for pytest < 5.3.
+
+Features
+++++++++
+
+- Add ``condition`` keyword argument to the re-run marker.
+  (Thanks to `@BeyondEvil`_ for the PR.)
+
+- Add support for Python 3.9.
+  (Thanks to `@digitronik`_ for the PR.)
+
+- Add support for pytest 6.3.
+  (Thanks to `@bluetech`_ for the PR.)
+
+- Add compatibility with ``pytest-xdist >= 2.0``.
+  (Thanks to `@bluetech`_ for the PR.)
+
+Other changes
++++++++++++++
+
+- Check for the resultlog by feature and not by version as pytest master does
+  not provide a consistent version.
+
+.. _@BeyondEvil: https://github.com/BeyondEvil
+.. _@digitronik: https://github.com/digitronik
+.. _@bluetech: https://github.com/bluetech
+
+9.1.1 (2020-09-29)
+------------------
+
+Compatibility fix.
+++++++++++++++++++
+
+- Ignore ``--result-log`` command line option when used together with ``pytest
+  >= 6.1.0``, as it was removed there. This is a quick fix, use an older
+  version of pytest, if you want to keep this feature for now.
+  (Thanks to `@ntessore`_ for the PR)
+
+- Support up to pytest 6.1.0.
+
+.. _@ntessore: https://github.com/ntessore
+
+
+9.1 (2020-08-26)
+----------------
+
+Features
+++++++++
+
+- Add a new flag ``--only-rerun`` to allow for users to rerun only certain
+  errors.
+
+Other changes
++++++++++++++
+
+- Drop dependency on ``mock``.
+
+- Add support for pre-commit and add a linting tox target.
+  (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
+  (PR from `@gnikonorov`_)
+
+.. _@gnikonorov: https://github.com/gnikonorov
+
+
+9.0 (2020-03-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 4.4, 4.5 and 4.6.
+
+- Drop support for Python 2.7.
+
+
+Features
+++++++++
+
+- Add support for pytest 5.4.
+
+- Add support for Python 3.8.
+
+
+8.0 (2019-11-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
+
+- Drop support for Python 3.4.
+
+Features
+++++++++
+
+- Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
+
+Bug fixes
++++++++++
+
+- Explicitly depend on setuptools to ensure installation when working in
+  environments without it.
+  (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
+  (PR from `@Eric-Arellano`_)
+
+.. _@Eric-Arellano: https://github.com/Eric-Arellano
+
+
+7.0 (2019-03-28)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.8 and 3.9.
+
+Features
+++++++++
+
+- Add support for pytest version 4.2 and 4.3.
+
+Bug fixes
++++++++++
+
+- Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
+  (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
+  (PR from `@KillAChicken`_)
+
+.. _@KillAChicken: https://github.com/KillAChicken
+
+
+6.0 (2019-01-08)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.6 and 3.7.
+
+Features
+++++++++
+
+- Add support for pytest version 4.0 and 4.1.
+
+Bug fixes
++++++++++
+
+- Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
+  attribute on the test report.
+  (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
+  (Thanks to `@RibeiroAna`_ for the PR).
+
+.. _@RibeiroAna: https://github.com/RibeiroAna
+
+
+5.0 (2018-11-06)
+----------------
+
+- Drop support for pytest versions < 3.6 to reduce the maintenance burden.
+
+- Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
+  releases.
+
+- Add support for Python 3.7.
+
+- Fix issue can occur when used together with `pytest-flake8`
+  (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
+
+
+4.2 (2018-10-04)
+----------------
+
+- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on
+  rerun (Thanks to `@OlegKuzovkov`_ for the PR).
+
+- Added new ``execution_count`` attribute to reflect the number of test case
+  executions according to #67 issue. (Thanks to `@OlegKuzovkov`_ for the PR).
+
+.. _@OlegKuzovkov: https://github.com/OlegKuzovkov
+
+
+4.1 (2018-05-23)
+----------------
+
+- Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
+  `@The-Compiler`_ for the PR).
+
+.. _@The-Compiler: https://github.com/The-Compiler
+
+4.0 (2017-12-23)
+----------------
+
+- Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
+  for the PR).
+
+- Added support for pytest >= 3.3.
+
+- Drop support for pytest < 2.8.7.
+
+.. _@Kanguros: https://github.com/Kanguros
+
+
+3.1 (2017-08-29)
+----------------
+
+- Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
+
+.. _@davehunt: https://github.com/davehunt
+
+
+3.0 (2017-08-17)
+----------------
+
+- Add support for Python 3.6.
+
+- Add support for pytest 2.9 up to 3.2
+
+- Drop support for Python 2.6 and 3.3.
+
+- Drop support for pytest < 2.7.
+
+
+2.2 (2017-06-23)
+----------------
+
+- Ensure that other plugins can run after this one, in case of a global setting
+  ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
+
+.. _@sublee: https://github.com/sublee
+
+2.1.0 (2016-11-01)
+------------------
+
+- Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
+  without arguments.
+
+- Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
+
+.. _@tltx: https://github.com/tltx
+
+
+2.0.1 (2016-08-10)
+-----------------------------
+
+- Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
+
+- Fix error due to missing CHANGES.rst when creating the source distribution
+  by adding a MANIFEST.in.
+
+
+2.0.0 (2016-04-06)
+------------------
+
+- Drop support for Python 3.2, since supporting it became too much of a hassle.
+  (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
+
+
+1.0.2 (2016-03-29)
+------------------
+
+- Add support for `--resultlog` option by parsing reruns accordingly. (#28)
+
+
+1.0.1 (2016-02-02)
+------------------
+
+- Improve package description and include CHANGELOG into description.
+
+
+1.0.0 (2016-02-02)
+------------------
+
+- Rewrite to use newer API of pytest >= 2.3.0
+
+- Improve support for pytest-xdist by only logging the final result.
+  (Logging intermediate results will finish the test rather rerunning it.)
index 5e532664f79c866b8ad3e7cacb49c7750bc895b5..f17d4e13283de58e6ac088edd730b4a45b506da3 100644 (file)
@@ -10,17 +10,22 @@ re-runs tests to eliminate intermittent failures.
 .. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
    :target: https://pypi.python.org/pypi/pytest-rerunfailures/
    :alt: PyPI
-.. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
-   :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
-   :alt: Travis
+.. image:: https://github.com/pytest-dev/pytest-rerunfailures/workflows/Test/badge.svg
+   :target: https://github.com/pytest-dev/pytest-rerunfailures/actions
+   :alt: GitHub Actions
 
 Requirements
 ------------
 
 You will need the following prerequisites in order to use pytest-rerunfailures:
 
-- Python 3.5, up to 3.8, or PyPy3
-- pytest 5.0 or newer
+- Python 3.7, up to 3.12, or PyPy3
+- pytest 7.0 or newer
+
+This plugin can recover from a hard crash with the following optional
+prerequisites:
+
+- pytest-xdist 2.3.0 or newer
 
 This package is currently tested against the last 5 minor pytest releases. In
 case you work with an older version of pytest you should consider updating or
@@ -35,6 +40,17 @@ To install pytest-rerunfailures:
 
   $ pip install pytest-rerunfailures
 
+Recover from hard crashes
+-------------------------
+
+If one or more tests trigger a hard crash (for example: segfault), this plugin
+will ordinarily be unable to rerun the test. However, if a compatible version of
+pytest-xdist is installed, and the tests are run within pytest-xdist using the `-n`
+flag, this plugin will be able to rerun crashed tests, assuming the workers and
+controller are on the same LAN (this assumption is valid for almost all cases
+because most of the time the workers and controller are on the same computer).
+If this assumption is not the case, then this functionality may not operate.
+
 Re-run all failures
 -------------------
 
@@ -59,20 +75,45 @@ Re-run all failures matching certain expressions
 ------------------------------------------------
 
 To re-run only those failures that match a certain list of expressions, use the
-``--only-rerun`` flag and pass it a regular expression. For example, the following would
-only rerun those errors that match ``AssertionError``:
+``--only-rerun`` flag and pass it a regular expression. For example,
+the following would only rerun those errors that match ``AssertionError``:
 
 .. code-block:: bash
 
    $ pytest --reruns 5 --only-rerun AssertionError
 
-Passing the flag multiple times accumulates the arguments, so the following would only rerun
-those errors that match ``AssertionError`` or ``ValueError``:
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that match ``AssertionError`` or ``ValueError``:
 
 .. code-block:: bash
 
    $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
 
+Re-run all failures other than matching certain expressions
+-----------------------------------------------------------
+
+To re-run only those failures that do not match a certain list of expressions, use the
+``--rerun-except`` flag and pass it a regular expression. For example,
+the following would only rerun errors other than that match ``AssertionError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that does not match with ``AssertionError`` or ``OSError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError --rerun-except OSError
+
+.. note::
+
+   When the ```AssertionError``` comes from the use of the ``assert`` keyword,
+   use ``--rerun-except assert`` instead::
+
+   $ pytest --reruns 5 --rerun-except assert
+
 Re-run individual failures
 --------------------------
 
@@ -99,6 +140,52 @@ You can also specify the re-run delay time in the marker:
       import random
       assert random.choice([True, False])
 
+You can also specify an optional ``condition`` in the re-run marker:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(reruns=5, condition=sys.platform.startswith("win32"))
+   def test_example():
+      import random
+      assert random.choice([True, False])
+
+Exception filtering can be accomplished by specifying regular expressions for
+``only_rerun`` and ``rerun_except``. They override the ``--only-rerun`` and
+``--rerun-except`` command line arguments, respectively.
+
+Arguments can be a single string:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(rerun_except="AssertionError")
+   def test_example():
+       raise AssertionError()
+
+Or a list of strings:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(only_rerun=["AssertionError", "ValueError"])
+   def test_example():
+       raise AssertionError()
+
+
+You can use ``@pytest.mark.flaky(condition)`` similarly as ``@pytest.mark.skipif(condition)``, see `pytest-mark-skipif <https://docs.pytest.org/en/6.2.x/reference.html#pytest-mark-skipif>`_
+
+.. code-block:: python
+
+    @pytest.mark.flaky(reruns=2,condition="sys.platform.startswith('win32')")
+    def test_example():
+        import random
+        assert random.choice([True, False])
+    # totally same as the above
+    @pytest.mark.flaky(reruns=2,condition=sys.platform.startswith("win32"))
+    def test_example():
+      import random
+      assert random.choice([True, False])
+
+Note that the test will re-run for any ``condition`` that is truthy.
+
 Output
 ------
 
@@ -131,6 +218,9 @@ Compatibility
 * This plugin may *not* be used with class, module, and package level fixtures.
 * This plugin is *not* compatible with pytest-xdist's --looponfail flag.
 * This plugin is *not* compatible with the core --pdb flag.
+* This plugin is *not* compatible with the plugin
+  `flaky <https://pypi.org/project/flaky/>`_, you can only have
+  ``pytest-rerunfailures`` or ``flaky`` but not both.
 
 Resources
 ---------
@@ -141,7 +231,8 @@ Resources
 Development
 -----------
 
-* Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
+* Test execution count can be retrieved from the ``execution_count`` attribute
+  in test ``item``'s object. Example:
 
   .. code-block:: python
 
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644 (file)
index 0000000..9a86ff9
--- /dev/null
@@ -0,0 +1,5 @@
+[build-system]
+requires = [
+    "setuptools >= 40.0",
+]
+build-backend = "setuptools.build_meta"
index fcbc91634eafd67a9440a933c78d1dd96b320d17..204b2526d31873978f5c9ca5bf94e94d4f9d4db4 100644 (file)
-Metadata-Version: 1.2
+Metadata-Version: 2.1
 Name: pytest-rerunfailures
-Version: 9.1.1
+Version: 13.0
 Summary: pytest plugin to re-run tests to eliminate flaky failures
 Home-page: https://github.com/pytest-dev/pytest-rerunfailures
 Author: Leah Klearman
 Author-email: lklrmn@gmail.com
 License: Mozilla Public License 2.0 (MPL 2.0)
-Description: .. contents::
-        
-        pytest-rerunfailures
-        ====================
-        
-        pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
-        re-runs tests to eliminate intermittent failures.
-        
-        .. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
-           :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
-           :alt: License
-        .. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
-           :target: https://pypi.python.org/pypi/pytest-rerunfailures/
-           :alt: PyPI
-        .. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
-           :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
-           :alt: Travis
-        
-        Requirements
-        ------------
-        
-        You will need the following prerequisites in order to use pytest-rerunfailures:
-        
-        - Python 3.5, up to 3.8, or PyPy3
-        - pytest 5.0 or newer
-        
-        This package is currently tested against the last 5 minor pytest releases. In
-        case you work with an older version of pytest you should consider updating or
-        use one of the earlier versions of this package.
-        
-        Installation
-        ------------
-        
-        To install pytest-rerunfailures:
-        
-        .. code-block:: bash
-        
-          $ pip install pytest-rerunfailures
-        
-        Re-run all failures
-        -------------------
-        
-        To re-run all test failures, use the ``--reruns`` command line option with the
-        maximum number of times you'd like the tests to run:
-        
-        .. code-block:: bash
-        
-          $ pytest --reruns 5
-        
-        Failed fixture or setup_class will also be re-executed.
-        
-        To add a delay time between re-runs use the ``--reruns-delay`` command line
-        option with the amount of seconds that you would like wait before the next
-        test re-run is launched:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --reruns-delay 1
-        
-        Re-run all failures matching certain expressions
-        ------------------------------------------------
-        
-        To re-run only those failures that match a certain list of expressions, use the
-        ``--only-rerun`` flag and pass it a regular expression. For example, the following would
-        only rerun those errors that match ``AssertionError``:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --only-rerun AssertionError
-        
-        Passing the flag multiple times accumulates the arguments, so the following would only rerun
-        those errors that match ``AssertionError`` or ``ValueError``:
-        
-        .. code-block:: bash
-        
-           $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
-        
-        Re-run individual failures
-        --------------------------
-        
-        To mark individual tests as flaky, and have them automatically re-run when they
-        fail, add the ``flaky`` mark with the maximum number of times you'd like the
-        test to run:
-        
-        .. code-block:: python
-        
-          @pytest.mark.flaky(reruns=5)
-          def test_example():
-              import random
-              assert random.choice([True, False])
-        
-        Note that when teardown fails, two reports are generated for the case, one for
-        the test case and the other for the teardown error.
-        
-        You can also specify the re-run delay time in the marker:
-        
-        .. code-block:: python
-        
-          @pytest.mark.flaky(reruns=5, reruns_delay=2)
-          def test_example():
-              import random
-              assert random.choice([True, False])
-        
-        Output
-        ------
-        
-        Here's an example of the output provided by the plugin when run with
-        ``--reruns 2`` and ``-r aR``::
-        
-          test_report.py RRF
-        
-          ================================== FAILURES ==================================
-          __________________________________ test_fail _________________________________
-        
-              def test_fail():
-          >       assert False
-          E       assert False
-        
-          test_report.py:9: AssertionError
-          ============================ rerun test summary info =========================
-          RERUN test_report.py::test_fail
-          RERUN test_report.py::test_fail
-          ============================ short test summary info =========================
-          FAIL test_report.py::test_fail
-          ======================= 1 failed, 2 rerun in 0.02 seconds ====================
-        
-        Note that output will show all re-runs. Tests that fail on all the re-runs will
-        be marked as failed.
-        
-        Compatibility
-        -------------
-        
-        * This plugin may *not* be used with class, module, and package level fixtures.
-        * This plugin is *not* compatible with pytest-xdist's --looponfail flag.
-        * This plugin is *not* compatible with the core --pdb flag.
-        
-        Resources
-        ---------
-        
-        - `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
-        - `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
-        
-        Development
-        -----------
-        
-        * Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
-        
-          .. code-block:: python
-        
-            @hookimpl(tryfirst=True)
-            def pytest_runtest_makereport(item, call):
-                print(item.execution_count)
-        
-        
-        Changelog
-        =========
-        
-        9.1.1 (2020-09-29)
-        ------------------
-        
-        Compatibility fix.
-        ++++++++++++++++++
-        
-        - Ignore ``--result-log`` command line option when used together with ``pytest
-          >= 6.1.0``, as it was removed there. This is a quick fix, use an older
-          version of pytest, if you want to keep this feature for now.
-          (Thanks to `@ntessore`_ for the PR)
-        
-        - Support up to pytest 6.1.0.
-        
-        .. _@ntessore: https://github.com/ntessore
-        
-        
-        9.1 (2020-08-26)
-        ----------------
-        
-        Features
-        ++++++++
-        
-        - Add a new flag ``--only-rerun`` to allow for users to rerun only certain
-          errors.
-        
-        Other changes
-        +++++++++++++
-        
-        - Drop dependency on ``mock``.
-        
-        - Add support for pre-commit and add a linting tox target.
-          (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
-          (PR from `@gnikonorov`_)
-        
-        .. _@gnikonorov: https://github.com/gnikonorov
-        
-        
-        9.0 (2020-03-18)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 4.4, 4.5 and 4.6.
-        
-        - Drop support for Python 2.7.
-        
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest 5.4.
-        
-        - Add support for Python 3.8.
-        
-        
-        8.0 (2019-11-18)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
-        
-        - Drop support for Python 3.4.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
-        
-        Bug fixes
-        +++++++++
-        
-        - Explicitly depend on setuptools to ensure installation when working in
-          environments without it.
-          (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
-          (PR from `@Eric-Arellano`_)
-        
-        .. _@Eric-Arellano: https://github.com/Eric-Arellano
-        
-        
-        7.0 (2019-03-28)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.8 and 3.9.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.2 and 4.3.
-        
-        Bug fixes
-        +++++++++
-        
-        - Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
-          (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
-          (PR from `@KillAChicken`_)
-        
-        .. _@KillAChicken: https://github.com/KillAChicken
-        
-        
-        6.0 (2019-01-08)
-        ----------------
-        
-        Backwards incompatible changes
-        ++++++++++++++++++++++++++++++
-        
-        - Drop support for pytest version 3.6 and 3.7.
-        
-        Features
-        ++++++++
-        
-        - Add support for pytest version 4.0 and 4.1.
-        
-        Bug fixes
-        +++++++++
-        
-        - Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
-          attribute on the test report.
-          (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
-          (Thanks to `@RibeiroAna`_ for the PR).
-        
-        .. _@RibeiroAna: https://github.com/RibeiroAna
-        
-        
-        5.0 (2018-11-06)
-        ----------------
-        
-        - Drop support for pytest versions < 3.6 to reduce the maintenance burden.
-        
-        - Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
-          releases.
-        
-        - Add support for Python 3.7.
-        
-        - Fix issue can occur when used together with `pytest-flake8`
-          (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
-        
-        
-        4.2 (2018-10-04)
-        ----------------
-        
-        - Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
-          `@OlegKuzovkov`_ for the PR).
-        
-        - Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
-          (Thanks to `@OlegKuzovkov`_ for the PR).
-        
-        .. _@OlegKuzovkov: https://github.com/OlegKuzovkov
-        
-        
-        4.1 (2018-05-23)
-        ----------------
-        
-        - Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
-          `@The-Compiler`_ for the PR).
-        
-        .. _@The-Compiler: https://github.com/The-Compiler
-        
-        4.0 (2017-12-23)
-        ----------------
-        
-        - Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
-          for the PR).
-        
-        - Added support for pytest >= 3.3.
-        
-        - Drop support for pytest < 2.8.7.
-        
-        .. _@Kanguros: https://github.com/Kanguros
-        
-        
-        3.1 (2017-08-29)
-        ----------------
-        
-        - Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
-        
-        .. _@davehunt: https://github.com/davehunt
-        
-        
-        3.0 (2017-08-17)
-        ----------------
-        
-        - Add support for Python 3.6.
-        
-        - Add support for pytest 2.9 up to 3.2
-        
-        - Drop support for Python 2.6 and 3.3.
-        
-        - Drop support for pytest < 2.7.
-        
-        
-        2.2 (2017-06-23)
-        ----------------
-        
-        - Ensure that other plugins can run after this one, in case of a global setting
-          ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
-        
-        .. _@sublee: https://github.com/sublee
-        
-        2.1.0 (2016-11-01)
-        ------------------
-        
-        - Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
-          without arguments.
-        
-        - Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
-        
-        .. _@tltx: https://github.com/tltx
-        
-        
-        2.0.1 (2016-08-10)
-        -----------------------------
-        
-        - Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
-        
-        - Fix error due to missing CHANGES.rst when creating the source distribution
-          by adding a MANIFEST.in.
-        
-        
-        2.0.0 (2016-04-06)
-        ------------------
-        
-        - Drop support for Python 3.2, since supporting it became too much of a hassle.
-          (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
-        
-        
-        1.0.2 (2016-03-29)
-        ------------------
-        
-        - Add support for `--resultlog` option by parsing reruns accordingly. (#28)
-        
-        
-        1.0.1 (2016-02-02)
-        ------------------
-        
-        - Improve package description and include CHANGELOG into description.
-        
-        
-        1.0.0 (2016-02-02)
-        ------------------
-        
-        - Rewrite to use newer API of pytest >= 2.3.0
-        
-        - Improve support for pytest-xdist by only logging the final result.
-          (Logging intermediate results will finish the test rather rerunning it.)
-        
 Keywords: py.test pytest rerun failures flaky
-Platform: UNKNOWN
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Framework :: Pytest
 Classifier: Intended Audience :: Developers
@@ -426,10 +17,687 @@ Classifier: Operating System :: MacOS :: MacOS X
 Classifier: Topic :: Software Development :: Quality Assurance
 Classifier: Topic :: Software Development :: Testing
 Classifier: Topic :: Utilities
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3
 Classifier: Programming Language :: Python :: 3.7
 Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3 :: Only
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Python: >=3.5
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: packaging>=17.1
+Requires-Dist: pytest>=7
+Requires-Dist: importlib-metadata>=1; python_version < "3.8"
+
+.. contents::
+
+pytest-rerunfailures
+====================
+
+pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
+re-runs tests to eliminate intermittent failures.
+
+.. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
+   :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
+   :alt: License
+.. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
+   :target: https://pypi.python.org/pypi/pytest-rerunfailures/
+   :alt: PyPI
+.. image:: https://github.com/pytest-dev/pytest-rerunfailures/workflows/Test/badge.svg
+   :target: https://github.com/pytest-dev/pytest-rerunfailures/actions
+   :alt: GitHub Actions
+
+Requirements
+------------
+
+You will need the following prerequisites in order to use pytest-rerunfailures:
+
+- Python 3.7, up to 3.12, or PyPy3
+- pytest 7.0 or newer
+
+This plugin can recover from a hard crash with the following optional
+prerequisites:
+
+- pytest-xdist 2.3.0 or newer
+
+This package is currently tested against the last 5 minor pytest releases. In
+case you work with an older version of pytest you should consider updating or
+use one of the earlier versions of this package.
+
+Installation
+------------
+
+To install pytest-rerunfailures:
+
+.. code-block:: bash
+
+  $ pip install pytest-rerunfailures
+
+Recover from hard crashes
+-------------------------
+
+If one or more tests trigger a hard crash (for example: segfault), this plugin
+will ordinarily be unable to rerun the test. However, if a compatible version of
+pytest-xdist is installed, and the tests are run within pytest-xdist using the `-n`
+flag, this plugin will be able to rerun crashed tests, assuming the workers and
+controller are on the same LAN (this assumption is valid for almost all cases
+because most of the time the workers and controller are on the same computer).
+If this assumption is not the case, then this functionality may not operate.
+
+Re-run all failures
+-------------------
+
+To re-run all test failures, use the ``--reruns`` command line option with the
+maximum number of times you'd like the tests to run:
+
+.. code-block:: bash
+
+  $ pytest --reruns 5
+
+Failed fixture or setup_class will also be re-executed.
+
+To add a delay time between re-runs use the ``--reruns-delay`` command line
+option with the amount of seconds that you would like wait before the next
+test re-run is launched:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --reruns-delay 1
+
+Re-run all failures matching certain expressions
+------------------------------------------------
+
+To re-run only those failures that match a certain list of expressions, use the
+``--only-rerun`` flag and pass it a regular expression. For example,
+the following would only rerun those errors that match ``AssertionError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --only-rerun AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that match ``AssertionError`` or ``ValueError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
+
+Re-run all failures other than matching certain expressions
+-----------------------------------------------------------
+
+To re-run only those failures that do not match a certain list of expressions, use the
+``--rerun-except`` flag and pass it a regular expression. For example,
+the following would only rerun errors other than that match ``AssertionError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following
+would only rerun those errors that does not match with ``AssertionError`` or ``OSError``:
+
+.. code-block:: bash
+
+   $ pytest --reruns 5 --rerun-except AssertionError --rerun-except OSError
+
+.. note::
+
+   When the ```AssertionError``` comes from the use of the ``assert`` keyword,
+   use ``--rerun-except assert`` instead::
+
+   $ pytest --reruns 5 --rerun-except assert
+
+Re-run individual failures
+--------------------------
+
+To mark individual tests as flaky, and have them automatically re-run when they
+fail, add the ``flaky`` mark with the maximum number of times you'd like the
+test to run:
+
+.. code-block:: python
+
+  @pytest.mark.flaky(reruns=5)
+  def test_example():
+      import random
+      assert random.choice([True, False])
+
+Note that when teardown fails, two reports are generated for the case, one for
+the test case and the other for the teardown error.
+
+You can also specify the re-run delay time in the marker:
+
+.. code-block:: python
+
+  @pytest.mark.flaky(reruns=5, reruns_delay=2)
+  def test_example():
+      import random
+      assert random.choice([True, False])
+
+You can also specify an optional ``condition`` in the re-run marker:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(reruns=5, condition=sys.platform.startswith("win32"))
+   def test_example():
+      import random
+      assert random.choice([True, False])
+
+Exception filtering can be accomplished by specifying regular expressions for
+``only_rerun`` and ``rerun_except``. They override the ``--only-rerun`` and
+``--rerun-except`` command line arguments, respectively.
+
+Arguments can be a single string:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(rerun_except="AssertionError")
+   def test_example():
+       raise AssertionError()
+
+Or a list of strings:
+
+.. code-block:: python
+
+   @pytest.mark.flaky(only_rerun=["AssertionError", "ValueError"])
+   def test_example():
+       raise AssertionError()
+
+
+You can use ``@pytest.mark.flaky(condition)`` similarly as ``@pytest.mark.skipif(condition)``, see `pytest-mark-skipif <https://docs.pytest.org/en/6.2.x/reference.html#pytest-mark-skipif>`_
+
+.. code-block:: python
+
+    @pytest.mark.flaky(reruns=2,condition="sys.platform.startswith('win32')")
+    def test_example():
+        import random
+        assert random.choice([True, False])
+    # totally same as the above
+    @pytest.mark.flaky(reruns=2,condition=sys.platform.startswith("win32"))
+    def test_example():
+      import random
+      assert random.choice([True, False])
+
+Note that the test will re-run for any ``condition`` that is truthy.
+
+Output
+------
+
+Here's an example of the output provided by the plugin when run with
+``--reruns 2`` and ``-r aR``::
+
+  test_report.py RRF
+
+  ================================== FAILURES ==================================
+  __________________________________ test_fail _________________________________
+
+      def test_fail():
+  >       assert False
+  E       assert False
+
+  test_report.py:9: AssertionError
+  ============================ rerun test summary info =========================
+  RERUN test_report.py::test_fail
+  RERUN test_report.py::test_fail
+  ============================ short test summary info =========================
+  FAIL test_report.py::test_fail
+  ======================= 1 failed, 2 rerun in 0.02 seconds ====================
+
+Note that output will show all re-runs. Tests that fail on all the re-runs will
+be marked as failed.
+
+Compatibility
+-------------
+
+* This plugin may *not* be used with class, module, and package level fixtures.
+* This plugin is *not* compatible with pytest-xdist's --looponfail flag.
+* This plugin is *not* compatible with the core --pdb flag.
+* This plugin is *not* compatible with the plugin
+  `flaky <https://pypi.org/project/flaky/>`_, you can only have
+  ``pytest-rerunfailures`` or ``flaky`` but not both.
+
+Resources
+---------
+
+- `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
+- `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
+
+Development
+-----------
+
+* Test execution count can be retrieved from the ``execution_count`` attribute
+  in test ``item``'s object. Example:
+
+  .. code-block:: python
+
+    @hookimpl(tryfirst=True)
+    def pytest_runtest_makereport(item, call):
+        print(item.execution_count)
+
+Changelog
+=========
+
+13.0 (2023-11-22)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 7.0.
+
+Features
+++++++++
+
+- Add support for Python 3.12.
+
+Bug fixes
++++++++++
+
+- Fix crashitem names mismatch between client and server.
+  (`#172 <https://github.com/pytest-dev/pytest-rerunfailures/issues/172>`_)
+
+- Fix crash when setup fails with --rerun-except flag.
+  (`#230 <https://github.com/pytest-dev/pytest-rerunfailures/issues/230>`_)
+
+12.0 (2023-07-05)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for pytest < 6.2.
+
+Features
+++++++++
+
+- Add ``only_rerun`` and ``rerun_except`` arguments to ``@pytest.mark.flaky`` marker.
+
+- Add support for pytest 7.3, 7.4.
+
+Bug fixes
++++++++++
+
+- Failures are now rerun only if they match at least one ``--only-rerun``
+  pattern (if given) and none of the ``--rerun-except`` patterns. Previously,
+  using both ``--only-rerun`` and ``--rerun-except`` together could cause
+  failures to be rerun even if they did not match any ``--only-rerun``
+  pattern, and when using multiple ``--rerun-except`` patterns, all failures
+  would be rerun unless they matched every pattern.
+  (`#225 <https://github.com/pytest-dev/pytest-rerunfailures/issues/225>`_)
+
+
+11.1.2 (2023-03-09)
+-------------------
+
+Bug fixes
++++++++++
+
+- Execute teardown when test was skipped in setup phase of a fixture.
+
+
+11.1.1 (2023-02-17)
+-------------------
+
+Bug fixes
++++++++++
+
+- Fix crash during teardown when runtest protocol hook is overwritten by
+  another plugin.
+
+- Fix crash during teardown when TestCase class is used as base class.
+
+
+11.1 (2023-02-09)
+-----------------
+
+Bug fixes
++++++++++
+
+- Run teardown of session, class, ... scoped fixtures only once after rerunning tests
+
+Features
+++++++++
+
+- Expose `reruns` and `reruns_delay` through `pytest.ini` file.
+
+
+11.0 (2023-01-12)
+-----------------
+
+Breaking changes
+++++++++++++++++
+
+- Drop support for Python 3.6.
+
+- Drop support for pytest < 6.
+
+Bug fixes
++++++++++
+
+- Fix crash when pytest-xdist is installed but disabled.
+  (Thanks to `@mgorny <https://github.com/mgorny>`_ for the PR.)
+
+- Fix crash when xfail(strict=True) mark is used with --rerun-only flag.
+
+Features
+++++++++
+
+- Added option `--rerun-except` to rerun failed tests those are other than the mentioned Error.
+
+- Add support for Python 3.11.
+
+- Add support for pytest 7.0, 7.1, 7.2.
+
+
+10.2 (2021-09-17)
+-----------------
+
+Features
+++++++++
+
+- Allow recovery from crashed tests with pytest-xdist.
+- Add support for Python 3.10 (as of Python 3.10.rc2).
+  (Thanks to `@hugovk <https://github.com/hugovk>`_ for the PR.)
+
+
+10.1 (2021-07-02)
+-----------------
+
+Features
+++++++++
+
+- Allows using a ``str`` as condition for
+  ``@pytest.mark.flaky(condition)``
+  which gets evaluated dynamically similarly to
+  ``@pytest.mark.skipif(condition)``.
+  (`#162 <https://github.com/pytest-dev/pytest-rerunfailures/pull/162>`_
+  provided by `@15klli <https://github.com/15klli>`_)
+
+10.0 (2021-05-26)
+-----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for Python 3.5.
+
+- Drop support for pytest < 5.3.
+
+Features
+++++++++
+
+- Add ``condition`` keyword argument to the re-run marker.
+  (Thanks to `@BeyondEvil`_ for the PR.)
+
+- Add support for Python 3.9.
+  (Thanks to `@digitronik`_ for the PR.)
+
+- Add support for pytest 6.3.
+  (Thanks to `@bluetech`_ for the PR.)
+
+- Add compatibility with ``pytest-xdist >= 2.0``.
+  (Thanks to `@bluetech`_ for the PR.)
+
+Other changes
++++++++++++++
+
+- Check for the resultlog by feature and not by version as pytest master does
+  not provide a consistent version.
+
+.. _@BeyondEvil: https://github.com/BeyondEvil
+.. _@digitronik: https://github.com/digitronik
+.. _@bluetech: https://github.com/bluetech
+
+9.1.1 (2020-09-29)
+------------------
+
+Compatibility fix.
+++++++++++++++++++
+
+- Ignore ``--result-log`` command line option when used together with ``pytest
+  >= 6.1.0``, as it was removed there. This is a quick fix, use an older
+  version of pytest, if you want to keep this feature for now.
+  (Thanks to `@ntessore`_ for the PR)
+
+- Support up to pytest 6.1.0.
+
+.. _@ntessore: https://github.com/ntessore
+
+
+9.1 (2020-08-26)
+----------------
+
+Features
+++++++++
+
+- Add a new flag ``--only-rerun`` to allow for users to rerun only certain
+  errors.
+
+Other changes
++++++++++++++
+
+- Drop dependency on ``mock``.
+
+- Add support for pre-commit and add a linting tox target.
+  (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
+  (PR from `@gnikonorov`_)
+
+.. _@gnikonorov: https://github.com/gnikonorov
+
+
+9.0 (2020-03-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 4.4, 4.5 and 4.6.
+
+- Drop support for Python 2.7.
+
+
+Features
+++++++++
+
+- Add support for pytest 5.4.
+
+- Add support for Python 3.8.
+
+
+8.0 (2019-11-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
+
+- Drop support for Python 3.4.
+
+Features
+++++++++
+
+- Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
+
+Bug fixes
++++++++++
+
+- Explicitly depend on setuptools to ensure installation when working in
+  environments without it.
+  (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
+  (PR from `@Eric-Arellano`_)
+
+.. _@Eric-Arellano: https://github.com/Eric-Arellano
+
+
+7.0 (2019-03-28)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.8 and 3.9.
+
+Features
+++++++++
+
+- Add support for pytest version 4.2 and 4.3.
+
+Bug fixes
++++++++++
+
+- Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
+  (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
+  (PR from `@KillAChicken`_)
+
+.. _@KillAChicken: https://github.com/KillAChicken
+
+
+6.0 (2019-01-08)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.6 and 3.7.
+
+Features
+++++++++
+
+- Add support for pytest version 4.0 and 4.1.
+
+Bug fixes
++++++++++
+
+- Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
+  attribute on the test report.
+  (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
+  (Thanks to `@RibeiroAna`_ for the PR).
+
+.. _@RibeiroAna: https://github.com/RibeiroAna
+
+
+5.0 (2018-11-06)
+----------------
+
+- Drop support for pytest versions < 3.6 to reduce the maintenance burden.
+
+- Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
+  releases.
+
+- Add support for Python 3.7.
+
+- Fix issue can occur when used together with `pytest-flake8`
+  (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
+
+
+4.2 (2018-10-04)
+----------------
+
+- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on
+  rerun (Thanks to `@OlegKuzovkov`_ for the PR).
+
+- Added new ``execution_count`` attribute to reflect the number of test case
+  executions according to #67 issue. (Thanks to `@OlegKuzovkov`_ for the PR).
+
+.. _@OlegKuzovkov: https://github.com/OlegKuzovkov
+
+
+4.1 (2018-05-23)
+----------------
+
+- Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
+  `@The-Compiler`_ for the PR).
+
+.. _@The-Compiler: https://github.com/The-Compiler
+
+4.0 (2017-12-23)
+----------------
+
+- Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
+  for the PR).
+
+- Added support for pytest >= 3.3.
+
+- Drop support for pytest < 2.8.7.
+
+.. _@Kanguros: https://github.com/Kanguros
+
+
+3.1 (2017-08-29)
+----------------
+
+- Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
+
+.. _@davehunt: https://github.com/davehunt
+
+
+3.0 (2017-08-17)
+----------------
+
+- Add support for Python 3.6.
+
+- Add support for pytest 2.9 up to 3.2
+
+- Drop support for Python 2.6 and 3.3.
+
+- Drop support for pytest < 2.7.
+
+
+2.2 (2017-06-23)
+----------------
+
+- Ensure that other plugins can run after this one, in case of a global setting
+  ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
+
+.. _@sublee: https://github.com/sublee
+
+2.1.0 (2016-11-01)
+------------------
+
+- Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
+  without arguments.
+
+- Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
+
+.. _@tltx: https://github.com/tltx
+
+
+2.0.1 (2016-08-10)
+-----------------------------
+
+- Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
+
+- Fix error due to missing CHANGES.rst when creating the source distribution
+  by adding a MANIFEST.in.
+
+
+2.0.0 (2016-04-06)
+------------------
+
+- Drop support for Python 3.2, since supporting it became too much of a hassle.
+  (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
+
+
+1.0.2 (2016-03-29)
+------------------
+
+- Add support for `--resultlog` option by parsing reruns accordingly. (#28)
+
+
+1.0.1 (2016-02-02)
+------------------
+
+- Improve package description and include CHANGELOG into description.
+
+
+1.0.0 (2016-02-02)
+------------------
+
+- Rewrite to use newer API of pytest >= 2.3.0
+
+- Improve support for pytest-xdist by only logging the final result.
+  (Logging intermediate results will finish the test rather rerunning it.)
index 3fdb1f3e6875e325af9a6e1574d6d5d0abf753f6..36007744b57d9ea1b3eb5a4d6aab9fc68902b3b4 100644 (file)
@@ -1,8 +1,10 @@
 CHANGES.rst
 CONTRIBUTING.rst
+HEADER.rst
 LICENSE
 MANIFEST.in
 README.rst
+pyproject.toml
 pytest_rerunfailures.py
 setup.cfg
 setup.py
index aab02af1c8a44504f7095c0aa42e37afb86790f6..032b981504774d9b8a477974c81539e8edce4794 100644 (file)
@@ -1,3 +1,2 @@
 [pytest11]
 rerunfailures = pytest_rerunfailures
-
index c65108edde94259f919cb24a5462a0fcb6496629..a71d560451448bfa4eaf9fa47ee04d13e01ff06b 100644 (file)
@@ -1,2 +1,5 @@
-setuptools>=40.0
-pytest>=5.0
+packaging>=17.1
+pytest>=7
+
+[:python_version < "3.8"]
+importlib-metadata>=1
index fd2517d75488e1be7fe249f77c2be41ac004d2b8..12b09e62ade4e7b09582ce80236e332189112d92 100644 (file)
@@ -1,22 +1,36 @@
+import hashlib
+import os
+import platform
 import re
+import socket
+import sys
+import threading
 import time
+import traceback
 import warnings
+from contextlib import suppress
 
-import pkg_resources
 import pytest
+from _pytest.outcomes import fail
 from _pytest.runner import runtestprotocol
+from packaging.version import parse as parse_version
 
-PYTEST_GTE_54 = pkg_resources.parse_version(
-    pytest.__version__
-) >= pkg_resources.parse_version("5.4")
+if sys.version_info >= (3, 8):
+    import importlib.metadata as importlib_metadata
+else:
+    import importlib_metadata
 
-PYTEST_GTE_61 = pkg_resources.parse_version(
-    pytest.__version__
-) >= pkg_resources.parse_version("6.1")
+try:
+    from xdist.newhooks import pytest_handlecrashitem
+
+    HAS_PYTEST_HANDLECRASHITEM = True
+    del pytest_handlecrashitem
+except ImportError:
+    HAS_PYTEST_HANDLECRASHITEM = False
 
 
 def works_with_current_xdist():
-    """Returns compatibility with installed pytest-xdist version.
+    """Return compatibility with installed pytest-xdist version.
 
     When running tests in parallel using pytest-xdist < 1.20.0, the first
     report that is logged will finish and terminate the current node rather
@@ -25,10 +39,15 @@ def works_with_current_xdist():
 
     """
     try:
-        d = pkg_resources.get_distribution("pytest-xdist")
-        return d.parsed_version >= pkg_resources.parse_version("1.20")
-    except pkg_resources.DistributionNotFound:
+        d = importlib_metadata.distribution("pytest-xdist")
+    except importlib_metadata.PackageNotFoundError:
         return None
+    else:
+        return parse_version(d.version) >= parse_version("1.20")
+
+
+RERUNS_DESC = "number of times to re-run failed tests. defaults to 0."
+RERUNS_DELAY_DESC = "add time (seconds) delay between reruns."
 
 
 # command line options
@@ -51,51 +70,28 @@ def pytest_addoption(parser):
         action="store",
         dest="reruns",
         type=int,
-        default=0,
-        help="number of times to re-run failed tests. defaults to 0.",
+        help=RERUNS_DESC,
     )
     group._addoption(
         "--reruns-delay",
         action="store",
         dest="reruns_delay",
         type=float,
-        default=0,
         help="add time (seconds) delay between reruns.",
     )
-
-
-def pytest_configure(config):
-    # add flaky marker
-    config.addinivalue_line(
-        "markers",
-        "flaky(reruns=1, reruns_delay=0): mark test to re-run up "
-        "to 'reruns' times. Add a delay of 'reruns_delay' seconds "
-        "between re-runs.",
+    group._addoption(
+        "--rerun-except",
+        action="append",
+        dest="rerun_except",
+        type=str,
+        default=None,
+        help="If passed, only rerun errors other than matching the "
+        "regex provided. Pass this flag multiple times to accumulate a list "
+        "of regexes to match",
     )
-
-
-def _get_resultlog(config):
-    if PYTEST_GTE_61:
-        return None
-    elif PYTEST_GTE_54:
-        # hack
-        from _pytest.resultlog import resultlog_key
-
-        return config._store.get(resultlog_key, default=None)
-    else:
-        return getattr(config, "_resultlog", None)
-
-
-def _set_resultlog(config, resultlog):
-    if PYTEST_GTE_61:
-        pass
-    elif PYTEST_GTE_54:
-        # hack
-        from _pytest.resultlog import resultlog_key
-
-        config._store[resultlog_key] = resultlog
-    else:
-        config._resultlog = resultlog
+    arg_type = "string"
+    parser.addini("reruns", RERUNS_DESC, type=arg_type)
+    parser.addini("reruns_delay", RERUNS_DELAY_DESC, type=arg_type)
 
 
 # making sure the options make sense
@@ -107,40 +103,30 @@ def check_options(config):
             if config.option.usepdb:  # a core option
                 raise pytest.UsageError("--reruns incompatible with --pdb")
 
-    resultlog = _get_resultlog(config)
-    if resultlog:
-        logfile = resultlog.logfile
-        config.pluginmanager.unregister(resultlog)
-        new_resultlog = RerunResultLog(config, logfile)
-        _set_resultlog(config, new_resultlog)
-        config.pluginmanager.register(new_resultlog)
-
 
 def _get_marker(item):
-    try:
-        return item.get_closest_marker("flaky")
-    except AttributeError:
-        # pytest < 3.6
-        return item.get_marker("flaky")
+    return item.get_closest_marker("flaky")
 
 
 def get_reruns_count(item):
     rerun_marker = _get_marker(item)
-    reruns = None
-
     # use the marker as a priority over the global setting.
     if rerun_marker is not None:
         if "reruns" in rerun_marker.kwargs:
             # check for keyword arguments
-            reruns = rerun_marker.kwargs["reruns"]
+            return rerun_marker.kwargs["reruns"]
         elif len(rerun_marker.args) > 0:
             # check for arguments
-            reruns = rerun_marker.args[0]
+            return rerun_marker.args[0]
         else:
-            reruns = 1
-    elif item.session.config.option.reruns:
-        # default to the global setting
-        reruns = item.session.config.option.reruns
+            return 1
+
+    reruns = item.session.config.getvalue("reruns")
+    if reruns is not None:
+        return reruns
+
+    with suppress(TypeError, ValueError):
+        reruns = int(item.session.config.getini("reruns"))
 
     return reruns
 
@@ -157,7 +143,12 @@ def get_reruns_delay(item):
         else:
             delay = 0
     else:
-        delay = item.session.config.option.reruns_delay
+        delay = item.session.config.getvalue("reruns_delay")
+        if delay is None:
+            try:
+                delay = float(item.session.config.getini("reruns_delay"))
+            except (TypeError, ValueError):
+                delay = 0
 
     if delay < 0:
         delay = 0
@@ -168,58 +159,363 @@ def get_reruns_delay(item):
     return delay
 
 
+def get_reruns_condition(item):
+    rerun_marker = _get_marker(item)
+
+    condition = True
+    if rerun_marker is not None and "condition" in rerun_marker.kwargs:
+        condition = evaluate_condition(
+            item, rerun_marker, rerun_marker.kwargs["condition"]
+        )
+
+    return condition
+
+
+def evaluate_condition(item, mark, condition: object) -> bool:
+    # copy from python3.8 _pytest.skipping.py
+
+    result = False
+    # String condition.
+    if isinstance(condition, str):
+        globals_ = {
+            "os": os,
+            "sys": sys,
+            "platform": platform,
+            "config": item.config,
+        }
+        if hasattr(item, "obj"):
+            globals_.update(item.obj.__globals__)  # type: ignore[attr-defined]
+        try:
+            filename = f"<{mark.name} condition>"
+            condition_code = compile(condition, filename, "eval")
+            result = eval(condition_code, globals_)
+        except SyntaxError as exc:
+            msglines = [
+                "Error evaluating %r condition" % mark.name,
+                "    " + condition,
+                "    " + " " * (exc.offset or 0) + "^",
+                "SyntaxError: invalid syntax",
+            ]
+            fail("\n".join(msglines), pytrace=False)
+        except Exception as exc:
+            msglines = [
+                "Error evaluating %r condition" % mark.name,
+                "    " + condition,
+                *traceback.format_exception_only(type(exc), exc),
+            ]
+            fail("\n".join(msglines), pytrace=False)
+
+    # Boolean condition.
+    else:
+        try:
+            result = bool(condition)
+        except Exception as exc:
+            msglines = [
+                "Error evaluating %r condition as a boolean" % mark.name,
+                *traceback.format_exception_only(type(exc), exc),
+            ]
+            fail("\n".join(msglines), pytrace=False)
+    return result
+
+
 def _remove_cached_results_from_failed_fixtures(item):
-    """
-    Note: remove all cached_result attribute from every fixture
-    """
+    """Note: remove all cached_result attribute from every fixture."""
     cached_result = "cached_result"
     fixture_info = getattr(item, "_fixtureinfo", None)
     for fixture_def_str in getattr(fixture_info, "name2fixturedefs", ()):
         fixture_defs = fixture_info.name2fixturedefs[fixture_def_str]
         for fixture_def in fixture_defs:
             if getattr(fixture_def, cached_result, None) is not None:
-                result, cache_key, err = getattr(fixture_def, cached_result)
+                result, _, err = getattr(fixture_def, cached_result)
                 if err:  # Deleting cached results for only failed fixtures
-                    if PYTEST_GTE_54:
-                        setattr(fixture_def, cached_result, None)
-                    else:
-                        delattr(fixture_def, cached_result)
+                    setattr(fixture_def, cached_result, None)
 
 
 def _remove_failed_setup_state_from_session(item):
     """
-    Note: remove all _prepare_exc attribute from every col in stack of
-          _setupstate and cleaning the stack itself
+    Clean up setup state.
+
+    Note: remove all failures from every node in _setupstate stack
+          and clean the stack itself
     """
-    prepare_exc = "_prepare_exc"
-    setup_state = getattr(item.session, "_setupstate")
-    for col in setup_state.stack:
-        if hasattr(col, prepare_exc):
-            delattr(col, prepare_exc)
-    setup_state.stack = list()
+    setup_state = item.session._setupstate
+    setup_state.stack = {}
+
 
+def _get_rerun_filter_regex(item, regex_name):
+    rerun_marker = _get_marker(item)
 
-def _should_hard_fail_on_error(session_config, report):
+    if rerun_marker is not None and regex_name in rerun_marker.kwargs:
+        regex = rerun_marker.kwargs[regex_name]
+        if isinstance(regex, str):
+            regex = [regex]
+    else:
+        regex = getattr(item.session.config.option, regex_name)
+
+    return regex
+
+
+def _matches_any_rerun_error(rerun_errors, report):
+    return _try_match_reprcrash(rerun_errors, report)
+
+
+def _matches_any_rerun_except_error(rerun_except_errors, report):
+    return _try_match_reprcrash(rerun_except_errors, report)
+
+
+def _try_match_reprcrash(rerun_errors, report):
+    for rerun_regex in rerun_errors:
+        try:
+            if re.search(rerun_regex, report.longrepr.reprcrash.message):
+                return True
+        except AttributeError:
+            if re.search(rerun_regex, report.longreprtext):
+                return True
+    return False
+
+
+def _should_hard_fail_on_error(item, report):
     if report.outcome != "failed":
         return False
 
-    rerun_errors = session_config.option.only_rerun
-    if not rerun_errors:
+    rerun_errors = _get_rerun_filter_regex(item, "only_rerun")
+    rerun_except_errors = _get_rerun_filter_regex(item, "rerun_except")
+
+    if (not rerun_errors) and (not rerun_except_errors):
+        # Using neither --only-rerun nor --rerun-except
         return False
 
-    for rerun_regex in rerun_errors:
-        if re.search(rerun_regex, report.longrepr.reprcrash.message):
-            return False
+    elif rerun_errors and (not rerun_except_errors):
+        # Using --only-rerun but not --rerun-except
+        return not _matches_any_rerun_error(rerun_errors, report)
 
-    return True
+    elif (not rerun_errors) and rerun_except_errors:
+        # Using --rerun-except but not --only-rerun
+        return _matches_any_rerun_except_error(rerun_except_errors, report)
+
+    else:
+        # Using both --only-rerun and --rerun-except
+        matches_rerun_only = _matches_any_rerun_error(rerun_errors, report)
+        matches_rerun_except = _matches_any_rerun_except_error(
+            rerun_except_errors, report
+        )
+        return (not matches_rerun_only) or matches_rerun_except
+
+
+def _should_not_rerun(item, report, reruns):
+    xfail = hasattr(report, "wasxfail")
+    is_terminal_error = _should_hard_fail_on_error(item, report)
+    condition = get_reruns_condition(item)
+    return (
+        item.execution_count > reruns
+        or not report.failed
+        or xfail
+        or is_terminal_error
+        or not condition
+    )
+
+
+def is_master(config):
+    return not (hasattr(config, "workerinput") or hasattr(config, "slaveinput"))
+
+
+def pytest_configure(config):
+    # add flaky marker
+    config.addinivalue_line(
+        "markers",
+        "flaky(reruns=1, reruns_delay=0): mark test to re-run up "
+        "to 'reruns' times. Add a delay of 'reruns_delay' seconds "
+        "between re-runs.",
+    )
+
+    if config.pluginmanager.hasplugin("xdist") and HAS_PYTEST_HANDLECRASHITEM:
+        config.pluginmanager.register(XDistHooks())
+        if is_master(config):
+            config.failures_db = ServerStatusDB()
+        else:
+            config.failures_db = ClientStatusDB(config.workerinput["sock_port"])
+    else:
+        config.failures_db = StatusDB()  # no-op db
+
+
+class XDistHooks:
+    def pytest_configure_node(self, node):
+        """Configure xdist hook for node sock_port."""
+        node.workerinput["sock_port"] = node.config.failures_db.sock_port
+
+    def pytest_handlecrashitem(self, crashitem, report, sched):
+        """Return the crashitem from pending and collection."""
+        db = sched.config.failures_db
+        reruns = db.get_test_reruns(crashitem)
+        if db.get_test_failures(crashitem) < reruns:
+            sched.mark_test_pending(crashitem)
+            report.outcome = "rerun"
+
+        db.add_test_failure(crashitem)
+
+
+# An in-memory db residing in the master that records
+# the number of reruns (set before test setup)
+# and failures (set after each failure or crash)
+# accessible from both the master and worker
+class StatusDB:
+    def __init__(self):
+        self.delim = b"\n"
+        self.hmap = {}
+
+    def _hash(self, crashitem: str) -> str:
+        if crashitem not in self.hmap:
+            self.hmap[crashitem] = hashlib.sha1(
+                crashitem.encode(),
+            ).hexdigest()[:10]
+
+        return self.hmap[crashitem]
+
+    def add_test_failure(self, crashitem):
+        hash = self._hash(crashitem)
+        failures = self._get(hash, "f")
+        failures += 1
+        self._set(hash, "f", failures)
+
+    def get_test_failures(self, crashitem):
+        hash = self._hash(crashitem)
+        return self._get(hash, "f")
+
+    def set_test_reruns(self, crashitem, reruns):
+        hash = self._hash(crashitem)
+        self._set(hash, "r", reruns)
+
+    def get_test_reruns(self, crashitem):
+        hash = self._hash(crashitem)
+        return self._get(hash, "r")
+
+    # i is a hash of the test name, t_f.py::test_t
+    # k is f for failures or r for reruns
+    # v is the number of failures or reruns (an int)
+    def _set(self, i: str, k: str, v: int):
+        pass
+
+    def _get(self, i: str, k: str) -> int:
+        return 0
+
+
+class SocketDB(StatusDB):
+    def __init__(self):
+        super().__init__()
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.setblocking(1)
+
+    def _sock_recv(self, conn) -> str:
+        buf = b""
+        while True:
+            b = conn.recv(1)
+            if b == self.delim:
+                break
+            buf += b
+
+        return buf.decode()
+
+    def _sock_send(self, conn, msg: str):
+        conn.send(msg.encode() + self.delim)
+
+
+class ServerStatusDB(SocketDB):
+    def __init__(self):
+        super().__init__()
+        self.sock.bind(("localhost", 0))
+        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+        self.rerunfailures_db = {}
+        t = threading.Thread(target=self.run_server, daemon=True)
+        t.start()
+
+    @property
+    def sock_port(self):
+        return self.sock.getsockname()[1]
+
+    def run_server(self):
+        self.sock.listen()
+        while True:
+            conn, _ = self.sock.accept()
+            t = threading.Thread(target=self.run_connection, args=(conn,), daemon=True)
+            t.start()
+
+    def run_connection(self, conn):
+        with suppress(ConnectionError):
+            while True:
+                op, i, k, v = self._sock_recv(conn).split("|")
+                if op == "set":
+                    self._set(i, k, int(v))
+                elif op == "get":
+                    self._sock_send(conn, str(self._get(i, k)))
+
+    def _set(self, i: str, k: str, v: int):
+        if i not in self.rerunfailures_db:
+            self.rerunfailures_db[i] = {}
+        self.rerunfailures_db[i][k] = v
+
+    def _get(self, i: str, k: str) -> int:
+        try:
+            return self.rerunfailures_db[i][k]
+        except KeyError:
+            return 0
+
+
+class ClientStatusDB(SocketDB):
+    def __init__(self, sock_port):
+        super().__init__()
+        self.sock.connect(("localhost", sock_port))
+
+    def _set(self, i: str, k: str, v: int):
+        self._sock_send(self.sock, "|".join(("set", i, k, str(v))))
+
+    def _get(self, i: str, k: str) -> int:
+        self._sock_send(self.sock, "|".join(("get", i, k, "")))
+        return int(self._sock_recv(self.sock))
+
+
+def pytest_runtest_teardown(item, nextitem):
+    reruns = get_reruns_count(item)
+    if reruns is None:
+        # global setting is not specified, and this test is not marked with
+        # flaky
+        return
+
+    if not hasattr(item, "execution_count"):
+        # pytest_runtest_protocol hook of this plugin was not executed
+        # -> teardown needs to be skipped as well
+        return
+
+    _test_failed_statuses = getattr(item, "_test_failed_statuses", {})
+    if item.execution_count <= reruns and any(_test_failed_statuses.values()):
+        # clean cashed results from any level of setups
+        _remove_cached_results_from_failed_fixtures(item)
+
+        if item in item.session._setupstate.stack:
+            for key in list(item.session._setupstate.stack.keys()):
+                if key != item:
+                    del item.session._setupstate.stack[key]
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+    outcome = yield
+    result = outcome.get_result()
+    if result.when == "setup":
+        # clean failed statuses at the beginning of each test/rerun
+        setattr(item, "_test_failed_statuses", {})
+    _test_failed_statuses = getattr(item, "_test_failed_statuses", {})
+    _test_failed_statuses[result.when] = result.failed
+    item._test_failed_statuses = _test_failed_statuses
 
 
 def pytest_runtest_protocol(item, nextitem):
     """
+    Run the test protocol.
+
     Note: when teardown fails, two reports are generated for the case, one for
     the test case and the other for the teardown error.
     """
-
     reruns = get_reruns_count(item)
     if reruns is None:
         # global setting is not specified, and this test is not marked with
@@ -230,8 +526,13 @@ def pytest_runtest_protocol(item, nextitem):
     # first item if necessary
     check_options(item.session.config)
     delay = get_reruns_delay(item)
-    parallel = hasattr(item.config, "slaveinput")
-    item.execution_count = 0
+    parallel = not is_master(item.config)
+    db = item.session.config.failures_db
+    item.execution_count = db.get_test_failures(item.nodeid)
+    db.set_test_reruns(item.nodeid, reruns)
+
+    if item.execution_count > reruns:
+        return True
 
     need_to_run = True
     while need_to_run:
@@ -240,15 +541,8 @@ def pytest_runtest_protocol(item, nextitem):
         reports = runtestprotocol(item, nextitem=nextitem, log=False)
 
         for report in reports:  # 3 reports: setup, call, teardown
-            is_terminal_error = _should_hard_fail_on_error(item.session.config, report)
             report.rerun = item.execution_count - 1
-            xfail = hasattr(report, "wasxfail")
-            if (
-                item.execution_count > reruns
-                or not report.failed
-                or xfail
-                or is_terminal_error
-            ):
+            if _should_not_rerun(item, report, reruns):
                 # last run or no failure detected, log normally
                 item.ihook.pytest_runtest_logreport(report=report)
             else:
@@ -274,15 +568,13 @@ def pytest_runtest_protocol(item, nextitem):
 
 
 def pytest_report_teststatus(report):
-    """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
-    """
+    # Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
     if report.outcome == "rerun":
         return "rerun", "R", ("RERUN", {"yellow": True})
 
 
 def pytest_terminal_summary(terminalreporter):
-    """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
-    """
+    # Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
     tr = terminalreporter
     if not tr.reportchars:
         return
@@ -303,38 +595,4 @@ def show_rerun(terminalreporter, lines):
     if rerun:
         for rep in rerun:
             pos = rep.nodeid
-            lines.append("RERUN {}".format(pos))
-
-
-if not PYTEST_GTE_61:
-    from _pytest.resultlog import ResultLog
-
-    class RerunResultLog(ResultLog):
-        def __init__(self, config, logfile):
-            ResultLog.__init__(self, config, logfile)
-
-        def pytest_runtest_logreport(self, report):
-            """
-            Adds support for rerun report fix for issue:
-            https://github.com/pytest-dev/pytest-rerunfailures/issues/28
-            """
-            if report.when != "call" and report.passed:
-                return
-            res = self.config.hook.pytest_report_teststatus(report=report)
-            code = res[1]
-            if code == "x":
-                longrepr = str(report.longrepr)
-            elif code == "X":
-                longrepr = ""
-            elif report.passed:
-                longrepr = ""
-            elif report.failed:
-                longrepr = str(report.longrepr)
-            elif report.skipped:
-                longrepr = str(report.longrepr[2])
-            elif report.outcome == "rerun":
-                longrepr = str(report.longrepr)
-            else:
-                longrepr = str(report.longrepr)
-
-            self.log_outcome(report, code, longrepr)
+            lines.append(f"RERUN {pos}")
index 23744584b32059e8195d4caaf629092e7740536c..8bb646dc8d3452b5493f7a8d64854dc55d7f10f6 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,6 +5,51 @@ universal = 0
 ignore = 
        .pre-commit-config.yaml
 
+[metadata]
+name = pytest-rerunfailures
+version = 13.0
+url = https://github.com/pytest-dev/pytest-rerunfailures
+description = pytest plugin to re-run tests to eliminate flaky failures
+long_description = file: HEADER.rst, README.rst, CHANGES.rst
+author = Leah Klearman
+author_email = lklrmn@gmail.com
+license = Mozilla Public License 2.0 (MPL 2.0)
+keywords = py.test pytest rerun failures flaky
+classifiers = 
+       Development Status :: 5 - Production/Stable
+       Framework :: Pytest
+       Intended Audience :: Developers
+       License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+       Operating System :: POSIX
+       Operating System :: Microsoft :: Windows
+       Operating System :: MacOS :: MacOS X
+       Topic :: Software Development :: Quality Assurance
+       Topic :: Software Development :: Testing
+       Topic :: Utilities
+       Programming Language :: Python :: 3
+       Programming Language :: Python :: 3.7
+       Programming Language :: Python :: 3.8
+       Programming Language :: Python :: 3.9
+       Programming Language :: Python :: 3.10
+       Programming Language :: Python :: 3.11
+       Programming Language :: Python :: 3.12
+       Programming Language :: Python :: 3 :: Only
+       Programming Language :: Python :: Implementation :: CPython
+       Programming Language :: Python :: Implementation :: PyPy
+
+[options]
+zip_safe = False
+py_modules = pytest_rerunfailures
+python_requires = >= 3.7
+install_requires = 
+       packaging >= 17.1
+       pytest >= 7
+       importlib-metadata>=1;python_version<"3.8"
+
+[options.entry_points]
+pytest11 = 
+       rerunfailures = pytest_rerunfailures
+
 [egg_info]
 tag_build = 
 tag_date = 0
old mode 100644 (file)
new mode 100755 (executable)
index 62323ab..c823345
--- a/setup.py
+++ b/setup.py
@@ -1,39 +1,4 @@
+#!/usr/bin/env python
 from setuptools import setup
 
-with open("README.rst") as readme, open("CHANGES.rst") as changelog:
-    long_description = ".. contents::\n\n" + readme.read() + "\n\n" + changelog.read()
-
-setup(
-    name="pytest-rerunfailures",
-    version="9.1.1",
-    description="pytest plugin to re-run tests to eliminate flaky failures",
-    long_description=long_description,
-    author="Leah Klearman",
-    author_email="lklrmn@gmail.com",
-    url="https://github.com/pytest-dev/pytest-rerunfailures",
-    py_modules=["pytest_rerunfailures"],
-    entry_points={"pytest11": ["rerunfailures = pytest_rerunfailures"]},
-    install_requires=["setuptools>=40.0", "pytest >= 5.0"],
-    python_requires=">=3.5",
-    license="Mozilla Public License 2.0 (MPL 2.0)",
-    keywords="py.test pytest rerun failures flaky",
-    zip_safe=False,
-    classifiers=[
-        "Development Status :: 5 - Production/Stable",
-        "Framework :: Pytest",
-        "Intended Audience :: Developers",
-        "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
-        "Operating System :: POSIX",
-        "Operating System :: Microsoft :: Windows",
-        "Operating System :: MacOS :: MacOS X",
-        "Topic :: Software Development :: Quality Assurance",
-        "Topic :: Software Development :: Testing",
-        "Topic :: Utilities",
-        "Programming Language :: Python :: 3.5",
-        "Programming Language :: Python :: 3.6",
-        "Programming Language :: Python :: 3.7",
-        "Programming Language :: Python :: 3.8",
-        "Programming Language :: Python :: Implementation :: CPython",
-        "Programming Language :: Python :: Implementation :: PyPy",
-    ],
-)
+setup()
index 7fbcc2b39aa33c0b755d66f6c23d38d8fbd29ebe..c4f762303040eaf65030d6eca6a143ee819a091b 100644 (file)
@@ -2,45 +2,60 @@ import random
 import time
 from unittest import mock
 
-import pkg_resources
 import pytest
 
+from pytest_rerunfailures import HAS_PYTEST_HANDLECRASHITEM
 
 pytest_plugins = "pytester"
 
-PYTEST_GTE_61 = pkg_resources.parse_version(
-    pytest.__version__
-) >= pkg_resources.parse_version("6.1")
+has_xdist = HAS_PYTEST_HANDLECRASHITEM
 
 
 def temporary_failure(count=1):
-    return """
+    return f"""
             import py
             path = py.path.local(__file__).dirpath().ensure('test.res')
             count = path.read() or 1
-            if int(count) <= {0}:
+            if int(count) <= {count}:
                 path.write(int(count) + 1)
-                raise Exception('Failure: {{0}}'.format(count))""".format(
-        count
-    )
+                raise Exception('Failure: {{0}}'.format(count))"""
+
+
+def temporary_crash(count=1):
+    return f"""
+            import py
+            import os
+            path = py.path.local(__file__).dirpath().ensure('test.res')
+            count = path.read() or 1
+            if int(count) <= {count}:
+                path.write(int(count) + 1)
+                os._exit(1)"""
 
 
 def check_outcome_field(outcomes, field_name, expected_value):
     field_value = outcomes.get(field_name, 0)
-    assert (
-        field_value == expected_value
-    ), "outcomes.{} has unexpected value. Expected '{}' but got '{}'".format(
-        field_name, expected_value, field_value
+    assert field_value == expected_value, (
+        f"outcomes.{field_name} has unexpected value. "
+        f"Expected '{expected_value}' but got '{field_value}'"
     )
 
 
 def assert_outcomes(
-    result, passed=1, skipped=0, failed=0, error=0, xfailed=0, xpassed=0, rerun=0,
+    result,
+    passed=1,
+    skipped=0,
+    failed=0,
+    error=0,
+    xfailed=0,
+    xpassed=0,
+    rerun=0,
 ):
     outcomes = result.parseoutcomes()
     check_outcome_field(outcomes, "passed", passed)
     check_outcome_field(outcomes, "skipped", skipped)
     check_outcome_field(outcomes, "failed", failed)
+    field = "errors"
+    check_outcome_field(outcomes, field, error)
     check_outcome_field(outcomes, "xfailed", xfailed)
     check_outcome_field(outcomes, "xpassed", xpassed)
     check_outcome_field(outcomes, "rerun", rerun)
@@ -61,14 +76,12 @@ def test_no_rerun_on_pass(testdir):
 def test_no_rerun_on_skipif_mark(testdir):
     reason = str(random.random())
     testdir.makepyfile(
-        """
+        f"""
         import pytest
-        @pytest.mark.skipif(reason='{}')
+        @pytest.mark.skipif(reason='{reason}')
         def test_skip():
             pass
-    """.format(
-            reason
-        )
+    """
     )
     result = testdir.runpytest("--reruns", "1")
     assert_outcomes(result, passed=0, skipped=1)
@@ -77,13 +90,11 @@ def test_no_rerun_on_skipif_mark(testdir):
 def test_no_rerun_on_skip_call(testdir):
     reason = str(random.random())
     testdir.makepyfile(
-        """
+        f"""
         import pytest
         def test_skip():
-            pytest.skip('{}')
-    """.format(
-            reason
-        )
+            pytest.skip('{reason}')
+    """
     )
     result = testdir.runpytest("--reruns", "1")
     assert_outcomes(result, passed=0, skipped=1)
@@ -105,13 +116,11 @@ def test_no_rerun_on_xfail_mark(testdir):
 def test_no_rerun_on_xfail_call(testdir):
     reason = str(random.random())
     testdir.makepyfile(
-        """
+        f"""
         import pytest
         def test_xfail():
-            pytest.xfail('{}')
-    """.format(
-            reason
-        )
+            pytest.xfail('{reason}')
+    """
     )
     result = testdir.runpytest("--reruns", "1")
     assert_outcomes(result, passed=0, xfailed=1)
@@ -144,11 +153,9 @@ def test_rerun_fails_after_consistent_setup_failure(testdir):
 def test_rerun_passes_after_temporary_setup_failure(testdir):
     testdir.makepyfile("def test_pass(): pass")
     testdir.makeconftest(
-        """
+        f"""
         def pytest_runtest_setup(item):
-            {}""".format(
-            temporary_failure()
-        )
+            {temporary_failure()}"""
     )
     result = testdir.runpytest("--reruns", "1", "-r", "R")
     assert_outcomes(result, passed=1, rerun=1)
@@ -162,25 +169,38 @@ def test_rerun_fails_after_consistent_test_failure(testdir):
 
 def test_rerun_passes_after_temporary_test_failure(testdir):
     testdir.makepyfile(
-        """
+        f"""
         def test_pass():
-            {}""".format(
-            temporary_failure()
-        )
+            {temporary_failure()}"""
     )
     result = testdir.runpytest("--reruns", "1", "-r", "R")
     assert_outcomes(result, passed=1, rerun=1)
 
 
+@pytest.mark.skipif(not has_xdist, reason="requires xdist with crashitem")
+def test_rerun_passes_after_temporary_test_crash(testdir):
+    # note: we need two tests because there is a bug where xdist
+    # cannot rerun the last test if it crashes. the bug exists only
+    # in xdist is there is no error that causes the bug in this plugin.
+    testdir.makepyfile(
+        f"""
+        def test_crash():
+            {temporary_crash()}
+
+        def test_pass():
+            pass"""
+    )
+    result = testdir.runpytest("-p", "xdist", "-n", "1", "--reruns", "1", "-r", "R")
+    assert_outcomes(result, passed=2, rerun=1)
+
+
 def test_rerun_passes_after_temporary_test_failure_with_flaky_mark(testdir):
     testdir.makepyfile(
-        """
+        f"""
         import pytest
         @pytest.mark.flaky(reruns=2)
         def test_pass():
-            {}""".format(
-            temporary_failure(2)
-        )
+            {temporary_failure(2)}"""
     )
     result = testdir.runpytest("-r", "R")
     assert_outcomes(result, passed=1, rerun=2)
@@ -188,13 +208,11 @@ def test_rerun_passes_after_temporary_test_failure_with_flaky_mark(testdir):
 
 def test_reruns_if_flaky_mark_is_called_without_options(testdir):
     testdir.makepyfile(
-        """
+        f"""
         import pytest
         @pytest.mark.flaky()
         def test_pass():
-            {}""".format(
-            temporary_failure(1)
-        )
+            {temporary_failure(1)}"""
     )
     result = testdir.runpytest("-r", "R")
     assert_outcomes(result, passed=1, rerun=1)
@@ -202,13 +220,11 @@ def test_reruns_if_flaky_mark_is_called_without_options(testdir):
 
 def test_reruns_if_flaky_mark_is_called_with_positional_argument(testdir):
     testdir.makepyfile(
-        """
+        f"""
         import pytest
         @pytest.mark.flaky(2)
         def test_pass():
-            {}""".format(
-            temporary_failure(2)
-        )
+            {temporary_failure(2)}"""
     )
     result = testdir.runpytest("-r", "R")
     assert_outcomes(result, passed=1, rerun=2)
@@ -216,11 +232,9 @@ def test_reruns_if_flaky_mark_is_called_with_positional_argument(testdir):
 
 def test_no_extra_test_summary_for_reruns_by_default(testdir):
     testdir.makepyfile(
-        """
+        f"""
         def test_pass():
-            {}""".format(
-            temporary_failure()
-        )
+            {temporary_failure()}"""
     )
     result = testdir.runpytest("--reruns", "1")
     assert "RERUN" not in result.stdout.str()
@@ -229,11 +243,9 @@ def test_no_extra_test_summary_for_reruns_by_default(testdir):
 
 def test_extra_test_summary_for_reruns(testdir):
     testdir.makepyfile(
-        """
+        f"""
         def test_pass():
-            {}""".format(
-            temporary_failure()
-        )
+            {temporary_failure()}"""
     )
     result = testdir.runpytest("--reruns", "1", "-r", "R")
     result.stdout.fnmatch_lines_random(["RERUN test_*:*"])
@@ -242,11 +254,9 @@ def test_extra_test_summary_for_reruns(testdir):
 
 def test_verbose(testdir):
     testdir.makepyfile(
-        """
+        f"""
         def test_pass():
-            {}""".format(
-            temporary_failure()
-        )
+            {temporary_failure()}"""
     )
     result = testdir.runpytest("--reruns", "1", "-v")
     result.stdout.fnmatch_lines_random(["test_*:* RERUN*"])
@@ -283,19 +293,6 @@ def test_rerun_on_class_setup_error_with_reruns(testdir):
     assert_outcomes(result, passed=0, error=1, rerun=1)
 
 
-@pytest.mark.skipif(PYTEST_GTE_61, reason="--result-log removed in pytest>=6.1")
-def test_rerun_with_resultslog(testdir):
-    testdir.makepyfile(
-        """
-        def test_fail():
-            assert False"""
-    )
-
-    result = testdir.runpytest("--reruns", "2", "--result-log", "./pytest.log")
-
-    assert_outcomes(result, passed=0, failed=1, rerun=2)
-
-
 @pytest.mark.parametrize("delay_time", [-1, 0, 0.0, 1, 2.5])
 def test_reruns_with_delay(testdir, delay_time):
     testdir.makepyfile(
@@ -323,14 +320,12 @@ def test_reruns_with_delay(testdir, delay_time):
 @pytest.mark.parametrize("delay_time", [-1, 0, 0.0, 1, 2.5])
 def test_reruns_with_delay_marker(testdir, delay_time):
     testdir.makepyfile(
-        """
+        f"""
         import pytest
 
-        @pytest.mark.flaky(reruns=2, reruns_delay={})
+        @pytest.mark.flaky(reruns=2, reruns_delay={delay_time})
         def test_fail_two():
-            assert False""".format(
-            delay_time
-        )
+            assert False"""
     )
 
     time.sleep = mock.MagicMock()
@@ -351,7 +346,7 @@ def test_reruns_with_delay_marker(testdir, delay_time):
 
 def test_rerun_on_setup_class_with_error_with_reruns(testdir):
     """
-     Case: setup_class throwing error on the first execution for parametrized test
+    Case: setup_class throwing error on the first execution for parametrized test
     """
     testdir.makepyfile(
         """
@@ -498,12 +493,10 @@ def test_pytest_runtest_logfinish_is_called(testdir):
     hook_message = "Message from pytest_runtest_logfinish hook"
     testdir.makepyfile("def test_pass(): pass")
     testdir.makeconftest(
-        r"""
+        rf"""
         def pytest_runtest_logfinish(nodeid, location):
-            print("\n{}\n")
-    """.format(
-            hook_message
-        )
+            print("\n{hook_message}\n")
+    """
     )
     result = testdir.runpytest("--reruns", "1", "-s")
     result.stdout.fnmatch_lines(hook_message)
@@ -540,3 +533,553 @@ def test_only_rerun_flag(testdir, only_rerun_texts, should_rerun):
     assert_outcomes(
         result, passed=num_passed, failed=num_failed, rerun=num_reruns_actual
     )
+
+
+def test_no_rerun_on_strict_xfail_with_only_rerun_flag(testdir):
+    testdir.makepyfile(
+        """
+        import pytest
+        @pytest.mark.xfail(strict=True)
+        def test_xfail():
+            assert True
+    """
+    )
+    result = testdir.runpytest("--reruns", "1", "--only-rerun", "RuntimeError")
+    assert_outcomes(result, passed=0, failed=1, rerun=0)
+
+
+@pytest.mark.parametrize(
+    "rerun_except_texts, should_rerun",
+    [
+        (["AssertionError"], True),
+        (["Assertion*"], True),
+        (["Assertion"], True),
+        (["ValueError"], False),
+        (["AssertionError: "], True),
+        (["ERR"], False),
+        (["AssertionError", "OSError"], True),
+        (["ValueError", "OSError"], False),
+    ],
+)
+def test_rerun_except_flag(testdir, rerun_except_texts, should_rerun):
+    testdir.makepyfile('def test_rerun_except(): raise ValueError("ERR")')
+
+    num_failed = 1
+    num_passed = 0
+    num_reruns = 1
+    num_reruns_actual = num_reruns if should_rerun else 0
+
+    pytest_args = ["--reruns", str(num_reruns)]
+    for rerun_except_text in rerun_except_texts:
+        pytest_args.extend(["--rerun-except", rerun_except_text])
+    result = testdir.runpytest(*pytest_args)
+    assert_outcomes(
+        result, passed=num_passed, failed=num_failed, rerun=num_reruns_actual
+    )
+
+
+@pytest.mark.parametrize(
+    "only_rerun_texts, rerun_except_texts, should_rerun",
+    [
+        # Matches --only-rerun, but not --rerun-except (rerun)
+        (["ValueError"], ["Not a Match"], True),
+        (["ValueError", "AssertionError"], ["Not a match", "OSError"], True),
+        # Matches --only-rerun AND --rerun-except (no rerun)
+        (["ValueError"], ["ERR"], False),
+        (["OSError", "ValueError"], ["Not a match", "ERR"], False),
+        # Matches --rerun-except, but not --only-rerun (no rerun)
+        (["OSError", "AssertionError"], ["TypeError", "ValueError"], False),
+        # Matches neither --only-rerun nor --rerun-except (no rerun)
+        (["AssertionError"], ["OSError"], False),
+        # --rerun-except overrides --only-rerun for same arg (no rerun)
+        (["ValueError"], ["ValueError"], False),
+    ],
+)
+def test_rerun_except_and_only_rerun(
+    testdir, rerun_except_texts, only_rerun_texts, should_rerun
+):
+    testdir.makepyfile('def test_only_rerun_except(): raise ValueError("ERR")')
+
+    num_failed = 1
+    num_passed = 0
+    num_reruns = 1
+    num_reruns_actual = num_reruns if should_rerun else 0
+
+    pytest_args = ["--reruns", str(num_reruns)]
+    for only_rerun_text in only_rerun_texts:
+        pytest_args.extend(["--only-rerun", only_rerun_text])
+    for rerun_except_text in rerun_except_texts:
+        pytest_args.extend(["--rerun-except", rerun_except_text])
+    result = testdir.runpytest(*pytest_args)
+    assert_outcomes(
+        result, passed=num_passed, failed=num_failed, rerun=num_reruns_actual
+    )
+
+
+def test_rerun_except_passes_setup_errors(testdir):
+    testdir.makepyfile(
+        """
+        import pytest
+
+        @pytest.fixture()
+        def fixture_setup_fails(non_existent_fixture):
+            return 1
+
+        def test_will_not_run(fixture_setup_fails):
+            assert fixture_setup_fails == 1"""
+    )
+
+    num_reruns = 1
+    pytest_args = ["--reruns", str(num_reruns), "--rerun-except", "ValueError"]
+    result = testdir.runpytest(*pytest_args)
+    assert result.ret != pytest.ExitCode.INTERNAL_ERROR
+    assert_outcomes(result, passed=0, error=1, rerun=num_reruns)
+
+
+@pytest.mark.parametrize(
+    "condition, expected_reruns",
+    [
+        (1 == 1, 2),
+        (1 == 2, 0),
+        (True, 2),
+        (False, 0),
+        (1, 2),
+        (0, 0),
+        (["list"], 2),
+        ([], 0),
+        ({"dict": 1}, 2),
+        ({}, 0),
+        (None, 0),
+    ],
+)
+def test_reruns_with_condition_marker(testdir, condition, expected_reruns):
+    testdir.makepyfile(
+        f"""
+        import pytest
+
+        @pytest.mark.flaky(reruns=2, condition={condition})
+        def test_fail_two():
+            assert False"""
+    )
+
+    result = testdir.runpytest()
+    assert_outcomes(result, passed=0, failed=1, rerun=expected_reruns)
+
+
+@pytest.mark.parametrize(
+    "condition, expected_reruns",
+    [('sys.platform.startswith("non-exists") == False', 2), ("os.getpid() != -1", 2)],
+)
+# before evaluating the condition expression, sys&os&platform package has been imported
+def test_reruns_with_string_condition(testdir, condition, expected_reruns):
+    testdir.makepyfile(
+        f"""
+           import pytest
+
+           @pytest.mark.flaky(reruns=2, condition='{condition}')
+           def test_fail_two():
+               assert False"""
+    )
+    result = testdir.runpytest()
+    assert_outcomes(result, passed=0, failed=1, rerun=2)
+
+
+def test_reruns_with_string_condition_with_global_var(testdir):
+    testdir.makepyfile(
+        """
+              import pytest
+
+              rerunBool = False
+              @pytest.mark.flaky(reruns=2, condition='rerunBool')
+              def test_fail_two():
+                  global rerunBool
+                  rerunBool = True
+                  assert False"""
+    )
+    result = testdir.runpytest()
+    assert_outcomes(result, passed=0, failed=1, rerun=2)
+
+
+@pytest.mark.parametrize(
+    "marker_only_rerun,cli_only_rerun,should_rerun",
+    [
+        ("AssertionError", None, True),
+        ("AssertionError: ERR", None, True),
+        (["AssertionError"], None, True),
+        (["AssertionError: ABC"], None, False),
+        ("ValueError", None, False),
+        (["ValueError"], None, False),
+        (["AssertionError", "ValueError"], None, True),
+        # CLI override behavior
+        ("AssertionError", "ValueError", True),
+        ("ValueError", "AssertionError", False),
+    ],
+)
+def test_only_rerun_flag_in_flaky_marker(
+    testdir, marker_only_rerun, cli_only_rerun, should_rerun
+):
+    testdir.makepyfile(
+        f"""
+        import pytest
+
+        @pytest.mark.flaky(reruns=1, only_rerun={marker_only_rerun!r})
+        def test_fail():
+            raise AssertionError("ERR")
+        """
+    )
+    args = []
+    if cli_only_rerun:
+        args.extend(["--only-rerun", cli_only_rerun])
+    result = testdir.runpytest()
+    num_reruns = 1 if should_rerun else 0
+    assert_outcomes(result, passed=0, failed=1, rerun=num_reruns)
+
+
+@pytest.mark.parametrize(
+    "marker_rerun_except,cli_rerun_except,raised_error,should_rerun",
+    [
+        ("AssertionError", None, "AssertionError", False),
+        ("AssertionError: ERR", None, "AssertionError", False),
+        (["AssertionError"], None, "AssertionError", False),
+        (["AssertionError: ABC"], None, "AssertionError", True),
+        ("ValueError", None, "AssertionError", True),
+        (["ValueError"], None, "AssertionError", True),
+        (["OSError", "ValueError"], None, "AssertionError", True),
+        (["OSError", "AssertionError"], None, "AssertionError", False),
+        # CLI override behavior
+        ("AssertionError", "ValueError", "AssertionError", False),
+        ("ValueError", "AssertionError", "AssertionError", True),
+        ("CustomFailure", None, "CustomFailure", False),
+        ("CustomFailure", None, "AssertionError", True),
+    ],
+)
+def test_rerun_except_flag_in_flaky_marker(
+    testdir, marker_rerun_except, cli_rerun_except, raised_error, should_rerun
+):
+    testdir.makepyfile(
+        f"""
+        import pytest
+
+        class CustomFailure(Exception):
+            pass
+
+        @pytest.mark.flaky(reruns=1, rerun_except={marker_rerun_except!r})
+        def test_fail():
+            raise {raised_error}("ERR")
+        """
+    )
+    args = []
+    if cli_rerun_except:
+        args.extend(["--rerun-except", cli_rerun_except])
+    result = testdir.runpytest(*args)
+    num_reruns = 1 if should_rerun else 0
+    assert_outcomes(result, passed=0, failed=1, rerun=num_reruns)
+
+
+def test_ini_file_parameters(testdir):
+    testdir.makepyfile(
+        """
+        import time
+        def test_foo():
+            assert False
+    """
+    )
+    testdir.makeini(
+        """
+        [pytest]
+        reruns = 2
+        reruns_delay = 3
+    """
+    )
+    time.sleep = mock.MagicMock()
+    result = testdir.runpytest()
+
+    time.sleep.assert_called_with(3)
+    assert_outcomes(result, passed=0, failed=1, rerun=2)
+
+
+def test_ini_file_parameters_override(testdir):
+    testdir.makepyfile(
+        """
+        import time
+        def test_foo():
+            assert False
+    """
+    )
+    testdir.makeini(
+        """
+        [pytest]
+        reruns = 2
+        reruns_delay = 3
+    """
+    )
+    time.sleep = mock.MagicMock()
+    result = testdir.runpytest("--reruns", "4", "--reruns-delay", "5")
+
+    time.sleep.assert_called_with(5)
+    assert_outcomes(result, passed=0, failed=1, rerun=4)
+
+
+def test_run_session_teardown_once_after_reruns(testdir):
+    testdir.makepyfile(
+        """
+        import logging
+        import pytest
+
+        from unittest import TestCase
+
+        @pytest.fixture(scope='session', autouse=True)
+        def session_fixture():
+            logging.info('session setup')
+            yield
+            logging.info('session teardown')
+
+        @pytest.fixture(scope='class', autouse=True)
+        def class_fixture():
+            logging.info('class setup')
+            yield
+            logging.info('class teardown')
+
+        @pytest.fixture(scope='function', autouse=True)
+        def function_fixture():
+            logging.info('function setup')
+            yield
+            logging.info('function teardown')
+
+        @pytest.fixture(scope='function')
+        def function_skip_fixture():
+            logging.info('skip fixture setup')
+            pytest.skip('some reason')
+            yield
+            logging.info('skip fixture teardown')
+
+        @pytest.fixture(scope='function')
+        def function_setup_fail_fixture():
+            logging.info('fail fixture setup')
+            assert False
+            yield
+            logging.info('fail fixture teardown')
+
+        class TestFirstPassLastFail:
+
+            @staticmethod
+            def test_1():
+                logging.info("TestFirstPassLastFail 1")
+
+            @staticmethod
+            def test_2():
+                logging.info("TestFirstPassLastFail 2")
+                assert False
+
+        class TestFirstFailLastPass:
+
+            @staticmethod
+            def test_1():
+                logging.info("TestFirstFailLastPass 1")
+                assert False
+
+            @staticmethod
+            def test_2():
+                logging.info("TestFirstFailLastPass 2")
+
+        class TestSkipFirst:
+            @staticmethod
+            @pytest.mark.skipif(True, reason='Some reason')
+            def test_1():
+                logging.info("TestSkipFirst 1")
+                assert False
+
+            @staticmethod
+            def test_2():
+                logging.info("TestSkipFirst 2")
+                assert False
+
+        class TestSkipLast:
+            @staticmethod
+            def test_1():
+                logging.info("TestSkipLast 1")
+                assert False
+
+            @staticmethod
+            @pytest.mark.skipif(True, reason='Some reason')
+            def test_2():
+                logging.info("TestSkipLast 2")
+                assert False
+
+        class TestSkipFixture:
+            @staticmethod
+            def test_1(function_skip_fixture):
+                logging.info("TestSkipFixture 1")
+
+        class TestSetupFailed:
+            @staticmethod
+            def test_1(function_setup_fail_fixture):
+                logging.info("TestSetupFailed 1")
+
+        class TestTestCaseFailFirstFailLast(TestCase):
+
+            @staticmethod
+            def test_1():
+                logging.info("TestTestCaseFailFirstFailLast 1")
+                assert False
+
+            @staticmethod
+            def test_2():
+                logging.info("TestTestCaseFailFirstFailLast 2")
+                assert False
+
+        class TestTestCaseSkipFirst(TestCase):
+
+            @staticmethod
+            @pytest.mark.skipif(True, reason='Some reason')
+            def test_1():
+                logging.info("TestTestCaseSkipFirst 1")
+                assert False
+
+            @staticmethod
+            def test_2():
+                logging.info("TestTestCaseSkipFirst 2")
+                assert False
+
+        class TestTestCaseSkipLast(TestCase):
+
+            @staticmethod
+            def test_1():
+                logging.info("TestTestCaseSkipLast 1")
+                assert False
+
+            @staticmethod
+            @pytest.mark.skipif(True, reason="Some reason")
+            def test_2():
+                logging.info("TestTestCaseSkipLast 2")
+                assert False"""
+    )
+    import logging
+
+    logging.info = mock.MagicMock()
+
+    result = testdir.runpytest("--reruns", "2")
+    expected_calls = [
+        mock.call("session setup"),
+        # TestFirstPassLastFail
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestFirstPassLastFail 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstPassLastFail 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstPassLastFail 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstPassLastFail 2"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestFirstFailLastPass
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestFirstFailLastPass 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstFailLastPass 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstFailLastPass 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestFirstFailLastPass 2"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestSkipFirst
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestSkipLast
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestSkipFixture
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("skip fixture setup"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestSetupFailed
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("fail fixture setup"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("fail fixture setup"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("fail fixture setup"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestTestCaseFailFirstFailLast
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseFailFirstFailLast 2"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestTestCaseSkipFirst
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipFirst 2"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        # TestTestCaseSkipLast
+        mock.call("class setup"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("function setup"),
+        mock.call("TestTestCaseSkipLast 1"),
+        mock.call("function teardown"),
+        mock.call("class teardown"),
+        mock.call("session teardown"),
+    ]
+
+    logging.info.assert_has_calls(expected_calls, any_order=False)
+    assert_outcomes(result, failed=8, passed=2, rerun=18, skipped=5, error=1)
diff --git a/tox.ini b/tox.ini
index 10645fe409856ba825c68630b2e3a23d07a68e0c..f87f006b815f55fbdd3dd291136be55c0dbfcb66 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -11,19 +11,20 @@ max-line-length = 88
 [tox]
 envlist =
     linting
-    py{35,36,37,38,py3}-pytest{50,51,52,53,54,60,61}
-minversion = 3.17.1
+    py{37,38,39,310,311,312,py3}-pytest{70,71,72,73,74}
+    py{38,39,310,311,312,py3}-pytest{main}
+minversion = 4.0
 
 [testenv]
 commands = pytest test_pytest_rerunfailures.py {posargs}
 deps =
-    pytest50: pytest==5.0.*
-    pytest51: pytest==5.1.*
-    pytest52: pytest==5.2.*
-    pytest53: pytest==5.3.*
-    pytest54: pytest==5.4.*
-    pytest60: pytest==6.0.*
-    pytest61: pytest==6.1.*
+    pytest-xdist
+    pytest70: pytest==7.0.*
+    pytest71: pytest==7.1.*
+    pytest72: pytest==7.2.*
+    pytest73: pytest==7.3.*
+    pytest74: pytest==7.4.*
+    pytestmain: git+https://github.com/pytest-dev/pytest.git@main#egg=pytest
 
 [testenv:linting]
 basepython = python3