--- /dev/null
+Changelog
+=========
+
+9.1.1 (2020-09-29)
+------------------
+
+Compatibility fix.
+++++++++++++++++++
+
+- Ignore ``--result-log`` command line option when used together with ``pytest
+ >= 6.1.0``, as it was removed there. This is a quick fix, use an older
+ version of pytest, if you want to keep this feature for now.
+ (Thanks to `@ntessore`_ for the PR)
+
+- Support up to pytest 6.1.0.
+
+.. _@ntessore: https://github.com/ntessore
+
+
+9.1 (2020-08-26)
+----------------
+
+Features
+++++++++
+
+- Add a new flag ``--only-rerun`` to allow for users to rerun only certain
+ errors.
+
+Other changes
++++++++++++++
+
+- Drop dependency on ``mock``.
+
+- Add support for pre-commit and add a linting tox target.
+ (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
+ (PR from `@gnikonorov`_)
+
+.. _@gnikonorov: https://github.com/gnikonorov
+
+
+9.0 (2020-03-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 4.4, 4.5 and 4.6.
+
+- Drop support for Python 2.7.
+
+
+Features
+++++++++
+
+- Add support for pytest 5.4.
+
+- Add support for Python 3.8.
+
+
+8.0 (2019-11-18)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
+
+- Drop support for Python 3.4.
+
+Features
+++++++++
+
+- Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
+
+Bug fixes
++++++++++
+
+- Explicitly depend on setuptools to ensure installation when working in
+ environments without it.
+ (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
+ (PR from `@Eric-Arellano`_)
+
+.. _@Eric-Arellano: https://github.com/Eric-Arellano
+
+
+7.0 (2019-03-28)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.8 and 3.9.
+
+Features
+++++++++
+
+- Add support for pytest version 4.2 and 4.3.
+
+Bug fixes
++++++++++
+
+- Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
+ (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
+ (PR from `@KillAChicken`_)
+
+.. _@KillAChicken: https://github.com/KillAChicken
+
+
+6.0 (2019-01-08)
+----------------
+
+Backwards incompatible changes
+++++++++++++++++++++++++++++++
+
+- Drop support for pytest version 3.6 and 3.7.
+
+Features
+++++++++
+
+- Add support for pytest version 4.0 and 4.1.
+
+Bug fixes
++++++++++
+
+- Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
+ attribute on the test report.
+ (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
+ (Thanks to `@RibeiroAna`_ for the PR).
+
+.. _@RibeiroAna: https://github.com/RibeiroAna
+
+
+5.0 (2018-11-06)
+----------------
+
+- Drop support for pytest versions < 3.6 to reduce the maintenance burden.
+
+- Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
+ releases.
+
+- Add support for Python 3.7.
+
+- Fix issue can occur when used together with `pytest-flake8`
+ (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
+
+
+4.2 (2018-10-04)
+----------------
+
+- Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
+ `@OlegKuzovkov`_ for the PR).
+
+- Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
+ (Thanks to `@OlegKuzovkov`_ for the PR).
+
+.. _@OlegKuzovkov: https://github.com/OlegKuzovkov
+
+
+4.1 (2018-05-23)
+----------------
+
+- Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
+ `@The-Compiler`_ for the PR).
+
+.. _@The-Compiler: https://github.com/The-Compiler
+
+4.0 (2017-12-23)
+----------------
+
+- Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
+ for the PR).
+
+- Added support for pytest >= 3.3.
+
+- Drop support for pytest < 2.8.7.
+
+.. _@Kanguros: https://github.com/Kanguros
+
+
+3.1 (2017-08-29)
+----------------
+
+- Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
+
+.. _@davehunt: https://github.com/davehunt
+
+
+3.0 (2017-08-17)
+----------------
+
+- Add support for Python 3.6.
+
+- Add support for pytest 2.9 up to 3.2
+
+- Drop support for Python 2.6 and 3.3.
+
+- Drop support for pytest < 2.7.
+
+
+2.2 (2017-06-23)
+----------------
+
+- Ensure that other plugins can run after this one, in case of a global setting
+ ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
+
+.. _@sublee: https://github.com/sublee
+
+2.1.0 (2016-11-01)
+------------------
+
+- Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
+ without arguments.
+
+- Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
+
+.. _@tltx: https://github.com/tltx
+
+
+2.0.1 (2016-08-10)
+-----------------------------
+
+- Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
+
+- Fix error due to missing CHANGES.rst when creating the source distribution
+ by adding a MANIFEST.in.
+
+
+2.0.0 (2016-04-06)
+------------------
+
+- Drop support for Python 3.2, since supporting it became too much of a hassle.
+ (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
+
+
+1.0.2 (2016-03-29)
+------------------
+
+- Add support for `--resultlog` option by parsing reruns accordingly. (#28)
+
+
+1.0.1 (2016-02-02)
+------------------
+
+- Improve package description and include CHANGELOG into description.
+
+
+1.0.0 (2016-02-02)
+------------------
+
+- Rewrite to use newer API of pytest >= 2.3.0
+
+- Improve support for pytest-xdist by only logging the final result.
+ (Logging intermediate results will finish the test rather rerunning it.)
--- /dev/null
+============================
+Contribution getting started
+============================
+
+Contributions are highly welcomed and appreciated. Every little bit of help counts,
+so do not hesitate!
+
+.. contents::
+ :depth: 2
+ :backlinks: none
+
+
+Preparing Pull Requests
+-----------------------
+
+#. Fork the repository.
+
+#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed::
+
+ $ pip install --user pre-commit
+ $ pre-commit install
+
+ Afterwards ``pre-commit`` will run whenever you commit.
+
+ Note that this is automatically done when running ``tox -e linting``.
+
+ https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks
+ to ensure code-style and code formatting is consistent.
+
+#. Install `tox <https://tox.readthedocs.io/en/latest/>`_:
+
+ Tox is used to run all the tests and will automatically setup virtualenvs
+ to run the tests in. Implicitly https://virtualenv.pypa.io/ is used::
+
+ $ pip install tox
+ $ tox -e linting,py37
+
+#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
+
+#. Add a line item to the current **unreleased** version in ``CHANGES.rst``, unless the change is trivial.
--- /dev/null
+This Source Code Form is subject to the terms of the Mozilla Public
+License, v. 2.0. If a copy of the MPL was not distributed with this
+file, You can obtain one at https://www.mozilla.org/MPL/2.0/.
--- /dev/null
+include *.py
+include *.rst
+include LICENSE
+include tox.ini
--- /dev/null
+Metadata-Version: 1.2
+Name: pytest-rerunfailures
+Version: 9.1.1
+Summary: pytest plugin to re-run tests to eliminate flaky failures
+Home-page: https://github.com/pytest-dev/pytest-rerunfailures
+Author: Leah Klearman
+Author-email: lklrmn@gmail.com
+License: Mozilla Public License 2.0 (MPL 2.0)
+Description: .. contents::
+
+ pytest-rerunfailures
+ ====================
+
+ pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
+ re-runs tests to eliminate intermittent failures.
+
+ .. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
+ :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
+ :alt: License
+ .. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
+ :target: https://pypi.python.org/pypi/pytest-rerunfailures/
+ :alt: PyPI
+ .. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
+ :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
+ :alt: Travis
+
+ Requirements
+ ------------
+
+ You will need the following prerequisites in order to use pytest-rerunfailures:
+
+ - Python 3.5, up to 3.8, or PyPy3
+ - pytest 5.0 or newer
+
+ This package is currently tested against the last 5 minor pytest releases. In
+ case you work with an older version of pytest you should consider updating or
+ use one of the earlier versions of this package.
+
+ Installation
+ ------------
+
+ To install pytest-rerunfailures:
+
+ .. code-block:: bash
+
+ $ pip install pytest-rerunfailures
+
+ Re-run all failures
+ -------------------
+
+ To re-run all test failures, use the ``--reruns`` command line option with the
+ maximum number of times you'd like the tests to run:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5
+
+ Failed fixture or setup_class will also be re-executed.
+
+ To add a delay time between re-runs use the ``--reruns-delay`` command line
+ option with the amount of seconds that you would like wait before the next
+ test re-run is launched:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --reruns-delay 1
+
+ Re-run all failures matching certain expressions
+ ------------------------------------------------
+
+ To re-run only those failures that match a certain list of expressions, use the
+ ``--only-rerun`` flag and pass it a regular expression. For example, the following would
+ only rerun those errors that match ``AssertionError``:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError
+
+ Passing the flag multiple times accumulates the arguments, so the following would only rerun
+ those errors that match ``AssertionError`` or ``ValueError``:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
+
+ Re-run individual failures
+ --------------------------
+
+ To mark individual tests as flaky, and have them automatically re-run when they
+ fail, add the ``flaky`` mark with the maximum number of times you'd like the
+ test to run:
+
+ .. code-block:: python
+
+ @pytest.mark.flaky(reruns=5)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+ Note that when teardown fails, two reports are generated for the case, one for
+ the test case and the other for the teardown error.
+
+ You can also specify the re-run delay time in the marker:
+
+ .. code-block:: python
+
+ @pytest.mark.flaky(reruns=5, reruns_delay=2)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+ Output
+ ------
+
+ Here's an example of the output provided by the plugin when run with
+ ``--reruns 2`` and ``-r aR``::
+
+ test_report.py RRF
+
+ ================================== FAILURES ==================================
+ __________________________________ test_fail _________________________________
+
+ def test_fail():
+ > assert False
+ E assert False
+
+ test_report.py:9: AssertionError
+ ============================ rerun test summary info =========================
+ RERUN test_report.py::test_fail
+ RERUN test_report.py::test_fail
+ ============================ short test summary info =========================
+ FAIL test_report.py::test_fail
+ ======================= 1 failed, 2 rerun in 0.02 seconds ====================
+
+ Note that output will show all re-runs. Tests that fail on all the re-runs will
+ be marked as failed.
+
+ Compatibility
+ -------------
+
+ * This plugin may *not* be used with class, module, and package level fixtures.
+ * This plugin is *not* compatible with pytest-xdist's --looponfail flag.
+ * This plugin is *not* compatible with the core --pdb flag.
+
+ Resources
+ ---------
+
+ - `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
+ - `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
+
+ Development
+ -----------
+
+ * Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
+
+ .. code-block:: python
+
+ @hookimpl(tryfirst=True)
+ def pytest_runtest_makereport(item, call):
+ print(item.execution_count)
+
+
+ Changelog
+ =========
+
+ 9.1.1 (2020-09-29)
+ ------------------
+
+ Compatibility fix.
+ ++++++++++++++++++
+
+ - Ignore ``--result-log`` command line option when used together with ``pytest
+ >= 6.1.0``, as it was removed there. This is a quick fix, use an older
+ version of pytest, if you want to keep this feature for now.
+ (Thanks to `@ntessore`_ for the PR)
+
+ - Support up to pytest 6.1.0.
+
+ .. _@ntessore: https://github.com/ntessore
+
+
+ 9.1 (2020-08-26)
+ ----------------
+
+ Features
+ ++++++++
+
+ - Add a new flag ``--only-rerun`` to allow for users to rerun only certain
+ errors.
+
+ Other changes
+ +++++++++++++
+
+ - Drop dependency on ``mock``.
+
+ - Add support for pre-commit and add a linting tox target.
+ (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
+ (PR from `@gnikonorov`_)
+
+ .. _@gnikonorov: https://github.com/gnikonorov
+
+
+ 9.0 (2020-03-18)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 4.4, 4.5 and 4.6.
+
+ - Drop support for Python 2.7.
+
+
+ Features
+ ++++++++
+
+ - Add support for pytest 5.4.
+
+ - Add support for Python 3.8.
+
+
+ 8.0 (2019-11-18)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
+
+ - Drop support for Python 3.4.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
+
+ Bug fixes
+ +++++++++
+
+ - Explicitly depend on setuptools to ensure installation when working in
+ environments without it.
+ (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
+ (PR from `@Eric-Arellano`_)
+
+ .. _@Eric-Arellano: https://github.com/Eric-Arellano
+
+
+ 7.0 (2019-03-28)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.8 and 3.9.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.2 and 4.3.
+
+ Bug fixes
+ +++++++++
+
+ - Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
+ (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
+ (PR from `@KillAChicken`_)
+
+ .. _@KillAChicken: https://github.com/KillAChicken
+
+
+ 6.0 (2019-01-08)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.6 and 3.7.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.0 and 4.1.
+
+ Bug fixes
+ +++++++++
+
+ - Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
+ attribute on the test report.
+ (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
+ (Thanks to `@RibeiroAna`_ for the PR).
+
+ .. _@RibeiroAna: https://github.com/RibeiroAna
+
+
+ 5.0 (2018-11-06)
+ ----------------
+
+ - Drop support for pytest versions < 3.6 to reduce the maintenance burden.
+
+ - Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
+ releases.
+
+ - Add support for Python 3.7.
+
+ - Fix issue can occur when used together with `pytest-flake8`
+ (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
+
+
+ 4.2 (2018-10-04)
+ ----------------
+
+ - Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
+ `@OlegKuzovkov`_ for the PR).
+
+ - Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
+ (Thanks to `@OlegKuzovkov`_ for the PR).
+
+ .. _@OlegKuzovkov: https://github.com/OlegKuzovkov
+
+
+ 4.1 (2018-05-23)
+ ----------------
+
+ - Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
+ `@The-Compiler`_ for the PR).
+
+ .. _@The-Compiler: https://github.com/The-Compiler
+
+ 4.0 (2017-12-23)
+ ----------------
+
+ - Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
+ for the PR).
+
+ - Added support for pytest >= 3.3.
+
+ - Drop support for pytest < 2.8.7.
+
+ .. _@Kanguros: https://github.com/Kanguros
+
+
+ 3.1 (2017-08-29)
+ ----------------
+
+ - Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
+
+ .. _@davehunt: https://github.com/davehunt
+
+
+ 3.0 (2017-08-17)
+ ----------------
+
+ - Add support for Python 3.6.
+
+ - Add support for pytest 2.9 up to 3.2
+
+ - Drop support for Python 2.6 and 3.3.
+
+ - Drop support for pytest < 2.7.
+
+
+ 2.2 (2017-06-23)
+ ----------------
+
+ - Ensure that other plugins can run after this one, in case of a global setting
+ ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
+
+ .. _@sublee: https://github.com/sublee
+
+ 2.1.0 (2016-11-01)
+ ------------------
+
+ - Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
+ without arguments.
+
+ - Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
+
+ .. _@tltx: https://github.com/tltx
+
+
+ 2.0.1 (2016-08-10)
+ -----------------------------
+
+ - Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
+
+ - Fix error due to missing CHANGES.rst when creating the source distribution
+ by adding a MANIFEST.in.
+
+
+ 2.0.0 (2016-04-06)
+ ------------------
+
+ - Drop support for Python 3.2, since supporting it became too much of a hassle.
+ (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
+
+
+ 1.0.2 (2016-03-29)
+ ------------------
+
+ - Add support for `--resultlog` option by parsing reruns accordingly. (#28)
+
+
+ 1.0.1 (2016-02-02)
+ ------------------
+
+ - Improve package description and include CHANGELOG into description.
+
+
+ 1.0.0 (2016-02-02)
+ ------------------
+
+ - Rewrite to use newer API of pytest >= 2.3.0
+
+ - Improve support for pytest-xdist by only logging the final result.
+ (Logging intermediate results will finish the test rather rerunning it.)
+
+Keywords: py.test pytest rerun failures flaky
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Pytest
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.5
--- /dev/null
+pytest-rerunfailures
+====================
+
+pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
+re-runs tests to eliminate intermittent failures.
+
+.. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
+ :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
+ :alt: License
+.. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
+ :target: https://pypi.python.org/pypi/pytest-rerunfailures/
+ :alt: PyPI
+.. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
+ :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
+ :alt: Travis
+
+Requirements
+------------
+
+You will need the following prerequisites in order to use pytest-rerunfailures:
+
+- Python 3.5, up to 3.8, or PyPy3
+- pytest 5.0 or newer
+
+This package is currently tested against the last 5 minor pytest releases. In
+case you work with an older version of pytest you should consider updating or
+use one of the earlier versions of this package.
+
+Installation
+------------
+
+To install pytest-rerunfailures:
+
+.. code-block:: bash
+
+ $ pip install pytest-rerunfailures
+
+Re-run all failures
+-------------------
+
+To re-run all test failures, use the ``--reruns`` command line option with the
+maximum number of times you'd like the tests to run:
+
+.. code-block:: bash
+
+ $ pytest --reruns 5
+
+Failed fixture or setup_class will also be re-executed.
+
+To add a delay time between re-runs use the ``--reruns-delay`` command line
+option with the amount of seconds that you would like wait before the next
+test re-run is launched:
+
+.. code-block:: bash
+
+ $ pytest --reruns 5 --reruns-delay 1
+
+Re-run all failures matching certain expressions
+------------------------------------------------
+
+To re-run only those failures that match a certain list of expressions, use the
+``--only-rerun`` flag and pass it a regular expression. For example, the following would
+only rerun those errors that match ``AssertionError``:
+
+.. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError
+
+Passing the flag multiple times accumulates the arguments, so the following would only rerun
+those errors that match ``AssertionError`` or ``ValueError``:
+
+.. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
+
+Re-run individual failures
+--------------------------
+
+To mark individual tests as flaky, and have them automatically re-run when they
+fail, add the ``flaky`` mark with the maximum number of times you'd like the
+test to run:
+
+.. code-block:: python
+
+ @pytest.mark.flaky(reruns=5)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+Note that when teardown fails, two reports are generated for the case, one for
+the test case and the other for the teardown error.
+
+You can also specify the re-run delay time in the marker:
+
+.. code-block:: python
+
+ @pytest.mark.flaky(reruns=5, reruns_delay=2)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+Output
+------
+
+Here's an example of the output provided by the plugin when run with
+``--reruns 2`` and ``-r aR``::
+
+ test_report.py RRF
+
+ ================================== FAILURES ==================================
+ __________________________________ test_fail _________________________________
+
+ def test_fail():
+ > assert False
+ E assert False
+
+ test_report.py:9: AssertionError
+ ============================ rerun test summary info =========================
+ RERUN test_report.py::test_fail
+ RERUN test_report.py::test_fail
+ ============================ short test summary info =========================
+ FAIL test_report.py::test_fail
+ ======================= 1 failed, 2 rerun in 0.02 seconds ====================
+
+Note that output will show all re-runs. Tests that fail on all the re-runs will
+be marked as failed.
+
+Compatibility
+-------------
+
+* This plugin may *not* be used with class, module, and package level fixtures.
+* This plugin is *not* compatible with pytest-xdist's --looponfail flag.
+* This plugin is *not* compatible with the core --pdb flag.
+
+Resources
+---------
+
+- `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
+- `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
+
+Development
+-----------
+
+* Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
+
+ .. code-block:: python
+
+ @hookimpl(tryfirst=True)
+ def pytest_runtest_makereport(item, call):
+ print(item.execution_count)
--- /dev/null
+Metadata-Version: 1.2
+Name: pytest-rerunfailures
+Version: 9.1.1
+Summary: pytest plugin to re-run tests to eliminate flaky failures
+Home-page: https://github.com/pytest-dev/pytest-rerunfailures
+Author: Leah Klearman
+Author-email: lklrmn@gmail.com
+License: Mozilla Public License 2.0 (MPL 2.0)
+Description: .. contents::
+
+ pytest-rerunfailures
+ ====================
+
+ pytest-rerunfailures is a plugin for `pytest <https://pytest.org>`_ that
+ re-runs tests to eliminate intermittent failures.
+
+ .. image:: https://img.shields.io/badge/license-MPL%202.0-blue.svg
+ :target: https://github.com/pytest-dev/pytest-rerunfailures/blob/master/LICENSE
+ :alt: License
+ .. image:: https://img.shields.io/pypi/v/pytest-rerunfailures.svg
+ :target: https://pypi.python.org/pypi/pytest-rerunfailures/
+ :alt: PyPI
+ .. image:: https://img.shields.io/travis/pytest-dev/pytest-rerunfailures.svg
+ :target: https://travis-ci.org/pytest-dev/pytest-rerunfailures/
+ :alt: Travis
+
+ Requirements
+ ------------
+
+ You will need the following prerequisites in order to use pytest-rerunfailures:
+
+ - Python 3.5, up to 3.8, or PyPy3
+ - pytest 5.0 or newer
+
+ This package is currently tested against the last 5 minor pytest releases. In
+ case you work with an older version of pytest you should consider updating or
+ use one of the earlier versions of this package.
+
+ Installation
+ ------------
+
+ To install pytest-rerunfailures:
+
+ .. code-block:: bash
+
+ $ pip install pytest-rerunfailures
+
+ Re-run all failures
+ -------------------
+
+ To re-run all test failures, use the ``--reruns`` command line option with the
+ maximum number of times you'd like the tests to run:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5
+
+ Failed fixture or setup_class will also be re-executed.
+
+ To add a delay time between re-runs use the ``--reruns-delay`` command line
+ option with the amount of seconds that you would like wait before the next
+ test re-run is launched:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --reruns-delay 1
+
+ Re-run all failures matching certain expressions
+ ------------------------------------------------
+
+ To re-run only those failures that match a certain list of expressions, use the
+ ``--only-rerun`` flag and pass it a regular expression. For example, the following would
+ only rerun those errors that match ``AssertionError``:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError
+
+ Passing the flag multiple times accumulates the arguments, so the following would only rerun
+ those errors that match ``AssertionError`` or ``ValueError``:
+
+ .. code-block:: bash
+
+ $ pytest --reruns 5 --only-rerun AssertionError --only-rerun ValueError
+
+ Re-run individual failures
+ --------------------------
+
+ To mark individual tests as flaky, and have them automatically re-run when they
+ fail, add the ``flaky`` mark with the maximum number of times you'd like the
+ test to run:
+
+ .. code-block:: python
+
+ @pytest.mark.flaky(reruns=5)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+ Note that when teardown fails, two reports are generated for the case, one for
+ the test case and the other for the teardown error.
+
+ You can also specify the re-run delay time in the marker:
+
+ .. code-block:: python
+
+ @pytest.mark.flaky(reruns=5, reruns_delay=2)
+ def test_example():
+ import random
+ assert random.choice([True, False])
+
+ Output
+ ------
+
+ Here's an example of the output provided by the plugin when run with
+ ``--reruns 2`` and ``-r aR``::
+
+ test_report.py RRF
+
+ ================================== FAILURES ==================================
+ __________________________________ test_fail _________________________________
+
+ def test_fail():
+ > assert False
+ E assert False
+
+ test_report.py:9: AssertionError
+ ============================ rerun test summary info =========================
+ RERUN test_report.py::test_fail
+ RERUN test_report.py::test_fail
+ ============================ short test summary info =========================
+ FAIL test_report.py::test_fail
+ ======================= 1 failed, 2 rerun in 0.02 seconds ====================
+
+ Note that output will show all re-runs. Tests that fail on all the re-runs will
+ be marked as failed.
+
+ Compatibility
+ -------------
+
+ * This plugin may *not* be used with class, module, and package level fixtures.
+ * This plugin is *not* compatible with pytest-xdist's --looponfail flag.
+ * This plugin is *not* compatible with the core --pdb flag.
+
+ Resources
+ ---------
+
+ - `Issue Tracker <https://github.com/pytest-dev/pytest-rerunfailures/issues>`_
+ - `Code <https://github.com/pytest-dev/pytest-rerunfailures/>`_
+
+ Development
+ -----------
+
+ * Test execution count can be retrieved from the ``execution_count`` attribute in test ``item``'s object. Example:
+
+ .. code-block:: python
+
+ @hookimpl(tryfirst=True)
+ def pytest_runtest_makereport(item, call):
+ print(item.execution_count)
+
+
+ Changelog
+ =========
+
+ 9.1.1 (2020-09-29)
+ ------------------
+
+ Compatibility fix.
+ ++++++++++++++++++
+
+ - Ignore ``--result-log`` command line option when used together with ``pytest
+ >= 6.1.0``, as it was removed there. This is a quick fix, use an older
+ version of pytest, if you want to keep this feature for now.
+ (Thanks to `@ntessore`_ for the PR)
+
+ - Support up to pytest 6.1.0.
+
+ .. _@ntessore: https://github.com/ntessore
+
+
+ 9.1 (2020-08-26)
+ ----------------
+
+ Features
+ ++++++++
+
+ - Add a new flag ``--only-rerun`` to allow for users to rerun only certain
+ errors.
+
+ Other changes
+ +++++++++++++
+
+ - Drop dependency on ``mock``.
+
+ - Add support for pre-commit and add a linting tox target.
+ (`#117 <https://github.com/pytest-dev/pytest-rerunfailures/pull/117>`_)
+ (PR from `@gnikonorov`_)
+
+ .. _@gnikonorov: https://github.com/gnikonorov
+
+
+ 9.0 (2020-03-18)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 4.4, 4.5 and 4.6.
+
+ - Drop support for Python 2.7.
+
+
+ Features
+ ++++++++
+
+ - Add support for pytest 5.4.
+
+ - Add support for Python 3.8.
+
+
+ 8.0 (2019-11-18)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.10, 4.0, 4.1, 4.2 and 4.3
+
+ - Drop support for Python 3.4.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.4, 4.5, 4.6, 5.0, 5.1 and 5.2.
+
+ Bug fixes
+ +++++++++
+
+ - Explicitly depend on setuptools to ensure installation when working in
+ environments without it.
+ (`#98 <https://github.com/pytest-dev/pytest-rerunfailures/pull/98>`_)
+ (PR from `@Eric-Arellano`_)
+
+ .. _@Eric-Arellano: https://github.com/Eric-Arellano
+
+
+ 7.0 (2019-03-28)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.8 and 3.9.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.2 and 4.3.
+
+ Bug fixes
+ +++++++++
+
+ - Fixed #83 issue about ignored ``pytest_runtest_logfinish`` hooks.
+ (`#83 <https://github.com/pytest-dev/pytest-rerunfailures/issues/83>`_)
+ (PR from `@KillAChicken`_)
+
+ .. _@KillAChicken: https://github.com/KillAChicken
+
+
+ 6.0 (2019-01-08)
+ ----------------
+
+ Backwards incompatible changes
+ ++++++++++++++++++++++++++++++
+
+ - Drop support for pytest version 3.6 and 3.7.
+
+ Features
+ ++++++++
+
+ - Add support for pytest version 4.0 and 4.1.
+
+ Bug fixes
+ +++++++++
+
+ - Fixed #77 regression issue introduced in 4.2 related to the ``rerun``
+ attribute on the test report.
+ (`#77 <https://github.com/pytest-dev/pytest-rerunfailures/issues/77>`_)
+ (Thanks to `@RibeiroAna`_ for the PR).
+
+ .. _@RibeiroAna: https://github.com/RibeiroAna
+
+
+ 5.0 (2018-11-06)
+ ----------------
+
+ - Drop support for pytest versions < 3.6 to reduce the maintenance burden.
+
+ - Add support up to pytest version 3.10. Thus supporting the newest 5 pytest
+ releases.
+
+ - Add support for Python 3.7.
+
+ - Fix issue can occur when used together with `pytest-flake8`
+ (`#73 <https://github.com/pytest-dev/pytest-rerunfailures/issues/73>`_)
+
+
+ 4.2 (2018-10-04)
+ ----------------
+
+ - Fixed #64 issue related to ``setup_class`` and ``fixture`` executions on rerun (Thanks to
+ `@OlegKuzovkov`_ for the PR).
+
+ - Added new ``execution_count`` attribute to reflect the number of test case executions according to #67 issue.
+ (Thanks to `@OlegKuzovkov`_ for the PR).
+
+ .. _@OlegKuzovkov: https://github.com/OlegKuzovkov
+
+
+ 4.1 (2018-05-23)
+ ----------------
+
+ - Add support for pytest 3.6 by using ``Node.get_closest_marker()`` (Thanks to
+ `@The-Compiler`_ for the PR).
+
+ .. _@The-Compiler: https://github.com/The-Compiler
+
+ 4.0 (2017-12-23)
+ ----------------
+
+ - Added option to add a delay time between test re-runs (Thanks to `@Kanguros`_
+ for the PR).
+
+ - Added support for pytest >= 3.3.
+
+ - Drop support for pytest < 2.8.7.
+
+ .. _@Kanguros: https://github.com/Kanguros
+
+
+ 3.1 (2017-08-29)
+ ----------------
+
+ - Restored compatibility with pytest-xdist. (Thanks to `@davehunt`_ for the PR)
+
+ .. _@davehunt: https://github.com/davehunt
+
+
+ 3.0 (2017-08-17)
+ ----------------
+
+ - Add support for Python 3.6.
+
+ - Add support for pytest 2.9 up to 3.2
+
+ - Drop support for Python 2.6 and 3.3.
+
+ - Drop support for pytest < 2.7.
+
+
+ 2.2 (2017-06-23)
+ ----------------
+
+ - Ensure that other plugins can run after this one, in case of a global setting
+ ``--rerun=0``. (Thanks to `@sublee`_ for the PR)
+
+ .. _@sublee: https://github.com/sublee
+
+ 2.1.0 (2016-11-01)
+ ------------------
+
+ - Add default value of ``reruns=1`` if ``pytest.mark.flaky()`` is called
+ without arguments.
+
+ - Also offer a distribution as universal wheel. (Thanks to `@tltx`_ for the PR)
+
+ .. _@tltx: https://github.com/tltx
+
+
+ 2.0.1 (2016-08-10)
+ -----------------------------
+
+ - Prepare CLI options to pytest 3.0, to avoid a deprecation warning.
+
+ - Fix error due to missing CHANGES.rst when creating the source distribution
+ by adding a MANIFEST.in.
+
+
+ 2.0.0 (2016-04-06)
+ ------------------
+
+ - Drop support for Python 3.2, since supporting it became too much of a hassle.
+ (Reason: Virtualenv 14+ / PIP 8+ do not support Python 3.2 anymore.)
+
+
+ 1.0.2 (2016-03-29)
+ ------------------
+
+ - Add support for `--resultlog` option by parsing reruns accordingly. (#28)
+
+
+ 1.0.1 (2016-02-02)
+ ------------------
+
+ - Improve package description and include CHANGELOG into description.
+
+
+ 1.0.0 (2016-02-02)
+ ------------------
+
+ - Rewrite to use newer API of pytest >= 2.3.0
+
+ - Improve support for pytest-xdist by only logging the final result.
+ (Logging intermediate results will finish the test rather rerunning it.)
+
+Keywords: py.test pytest rerun failures flaky
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Pytest
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.5
--- /dev/null
+CHANGES.rst
+CONTRIBUTING.rst
+LICENSE
+MANIFEST.in
+README.rst
+pytest_rerunfailures.py
+setup.cfg
+setup.py
+test_pytest_rerunfailures.py
+tox.ini
+pytest_rerunfailures.egg-info/PKG-INFO
+pytest_rerunfailures.egg-info/SOURCES.txt
+pytest_rerunfailures.egg-info/dependency_links.txt
+pytest_rerunfailures.egg-info/entry_points.txt
+pytest_rerunfailures.egg-info/not-zip-safe
+pytest_rerunfailures.egg-info/requires.txt
+pytest_rerunfailures.egg-info/top_level.txt
\ No newline at end of file
--- /dev/null
+[pytest11]
+rerunfailures = pytest_rerunfailures
+
--- /dev/null
+setuptools>=40.0
+pytest>=5.0
--- /dev/null
+pytest_rerunfailures
--- /dev/null
+import re
+import time
+import warnings
+
+import pkg_resources
+import pytest
+from _pytest.runner import runtestprotocol
+
+PYTEST_GTE_54 = pkg_resources.parse_version(
+ pytest.__version__
+) >= pkg_resources.parse_version("5.4")
+
+PYTEST_GTE_61 = pkg_resources.parse_version(
+ pytest.__version__
+) >= pkg_resources.parse_version("6.1")
+
+
+def works_with_current_xdist():
+ """Returns compatibility with installed pytest-xdist version.
+
+ When running tests in parallel using pytest-xdist < 1.20.0, the first
+ report that is logged will finish and terminate the current node rather
+ rerunning the test. Thus we must skip logging of intermediate results under
+ these circumstances, otherwise no test is rerun.
+
+ """
+ try:
+ d = pkg_resources.get_distribution("pytest-xdist")
+ return d.parsed_version >= pkg_resources.parse_version("1.20")
+ except pkg_resources.DistributionNotFound:
+ return None
+
+
+# command line options
+def pytest_addoption(parser):
+ group = parser.getgroup(
+ "rerunfailures", "re-run failing tests to eliminate flaky failures"
+ )
+ group._addoption(
+ "--only-rerun",
+ action="append",
+ dest="only_rerun",
+ type=str,
+ default=None,
+ help="If passed, only rerun errors matching the regex provided. "
+ "Pass this flag multiple times to accumulate a list of regexes "
+ "to match",
+ )
+ group._addoption(
+ "--reruns",
+ action="store",
+ dest="reruns",
+ type=int,
+ default=0,
+ help="number of times to re-run failed tests. defaults to 0.",
+ )
+ group._addoption(
+ "--reruns-delay",
+ action="store",
+ dest="reruns_delay",
+ type=float,
+ default=0,
+ help="add time (seconds) delay between reruns.",
+ )
+
+
+def pytest_configure(config):
+ # add flaky marker
+ config.addinivalue_line(
+ "markers",
+ "flaky(reruns=1, reruns_delay=0): mark test to re-run up "
+ "to 'reruns' times. Add a delay of 'reruns_delay' seconds "
+ "between re-runs.",
+ )
+
+
+def _get_resultlog(config):
+ if PYTEST_GTE_61:
+ return None
+ elif PYTEST_GTE_54:
+ # hack
+ from _pytest.resultlog import resultlog_key
+
+ return config._store.get(resultlog_key, default=None)
+ else:
+ return getattr(config, "_resultlog", None)
+
+
+def _set_resultlog(config, resultlog):
+ if PYTEST_GTE_61:
+ pass
+ elif PYTEST_GTE_54:
+ # hack
+ from _pytest.resultlog import resultlog_key
+
+ config._store[resultlog_key] = resultlog
+ else:
+ config._resultlog = resultlog
+
+
+# making sure the options make sense
+# should run before / at the beginning of pytest_cmdline_main
+def check_options(config):
+ val = config.getvalue
+ if not val("collectonly"):
+ if config.option.reruns != 0:
+ if config.option.usepdb: # a core option
+ raise pytest.UsageError("--reruns incompatible with --pdb")
+
+ resultlog = _get_resultlog(config)
+ if resultlog:
+ logfile = resultlog.logfile
+ config.pluginmanager.unregister(resultlog)
+ new_resultlog = RerunResultLog(config, logfile)
+ _set_resultlog(config, new_resultlog)
+ config.pluginmanager.register(new_resultlog)
+
+
+def _get_marker(item):
+ try:
+ return item.get_closest_marker("flaky")
+ except AttributeError:
+ # pytest < 3.6
+ return item.get_marker("flaky")
+
+
+def get_reruns_count(item):
+ rerun_marker = _get_marker(item)
+ reruns = None
+
+ # use the marker as a priority over the global setting.
+ if rerun_marker is not None:
+ if "reruns" in rerun_marker.kwargs:
+ # check for keyword arguments
+ reruns = rerun_marker.kwargs["reruns"]
+ elif len(rerun_marker.args) > 0:
+ # check for arguments
+ reruns = rerun_marker.args[0]
+ else:
+ reruns = 1
+ elif item.session.config.option.reruns:
+ # default to the global setting
+ reruns = item.session.config.option.reruns
+
+ return reruns
+
+
+def get_reruns_delay(item):
+ rerun_marker = _get_marker(item)
+
+ if rerun_marker is not None:
+ if "reruns_delay" in rerun_marker.kwargs:
+ delay = rerun_marker.kwargs["reruns_delay"]
+ elif len(rerun_marker.args) > 1:
+ # check for arguments
+ delay = rerun_marker.args[1]
+ else:
+ delay = 0
+ else:
+ delay = item.session.config.option.reruns_delay
+
+ if delay < 0:
+ delay = 0
+ warnings.warn(
+ "Delay time between re-runs cannot be < 0. Using default value: 0"
+ )
+
+ return delay
+
+
+def _remove_cached_results_from_failed_fixtures(item):
+ """
+ Note: remove all cached_result attribute from every fixture
+ """
+ cached_result = "cached_result"
+ fixture_info = getattr(item, "_fixtureinfo", None)
+ for fixture_def_str in getattr(fixture_info, "name2fixturedefs", ()):
+ fixture_defs = fixture_info.name2fixturedefs[fixture_def_str]
+ for fixture_def in fixture_defs:
+ if getattr(fixture_def, cached_result, None) is not None:
+ result, cache_key, err = getattr(fixture_def, cached_result)
+ if err: # Deleting cached results for only failed fixtures
+ if PYTEST_GTE_54:
+ setattr(fixture_def, cached_result, None)
+ else:
+ delattr(fixture_def, cached_result)
+
+
+def _remove_failed_setup_state_from_session(item):
+ """
+ Note: remove all _prepare_exc attribute from every col in stack of
+ _setupstate and cleaning the stack itself
+ """
+ prepare_exc = "_prepare_exc"
+ setup_state = getattr(item.session, "_setupstate")
+ for col in setup_state.stack:
+ if hasattr(col, prepare_exc):
+ delattr(col, prepare_exc)
+ setup_state.stack = list()
+
+
+def _should_hard_fail_on_error(session_config, report):
+ if report.outcome != "failed":
+ return False
+
+ rerun_errors = session_config.option.only_rerun
+ if not rerun_errors:
+ return False
+
+ for rerun_regex in rerun_errors:
+ if re.search(rerun_regex, report.longrepr.reprcrash.message):
+ return False
+
+ return True
+
+
+def pytest_runtest_protocol(item, nextitem):
+ """
+ Note: when teardown fails, two reports are generated for the case, one for
+ the test case and the other for the teardown error.
+ """
+
+ reruns = get_reruns_count(item)
+ if reruns is None:
+ # global setting is not specified, and this test is not marked with
+ # flaky
+ return
+
+ # while this doesn't need to be run with every item, it will fail on the
+ # first item if necessary
+ check_options(item.session.config)
+ delay = get_reruns_delay(item)
+ parallel = hasattr(item.config, "slaveinput")
+ item.execution_count = 0
+
+ need_to_run = True
+ while need_to_run:
+ item.execution_count += 1
+ item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
+ reports = runtestprotocol(item, nextitem=nextitem, log=False)
+
+ for report in reports: # 3 reports: setup, call, teardown
+ is_terminal_error = _should_hard_fail_on_error(item.session.config, report)
+ report.rerun = item.execution_count - 1
+ xfail = hasattr(report, "wasxfail")
+ if (
+ item.execution_count > reruns
+ or not report.failed
+ or xfail
+ or is_terminal_error
+ ):
+ # last run or no failure detected, log normally
+ item.ihook.pytest_runtest_logreport(report=report)
+ else:
+ # failure detected and reruns not exhausted, since i < reruns
+ report.outcome = "rerun"
+ time.sleep(delay)
+
+ if not parallel or works_with_current_xdist():
+ # will rerun test, log intermediate result
+ item.ihook.pytest_runtest_logreport(report=report)
+
+ # cleanin item's cashed results from any level of setups
+ _remove_cached_results_from_failed_fixtures(item)
+ _remove_failed_setup_state_from_session(item)
+
+ break # trigger rerun
+ else:
+ need_to_run = False
+
+ item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
+
+ return True
+
+
+def pytest_report_teststatus(report):
+ """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
+ """
+ if report.outcome == "rerun":
+ return "rerun", "R", ("RERUN", {"yellow": True})
+
+
+def pytest_terminal_summary(terminalreporter):
+ """Adapted from https://pytest.org/latest/_modules/_pytest/skipping.html
+ """
+ tr = terminalreporter
+ if not tr.reportchars:
+ return
+
+ lines = []
+ for char in tr.reportchars:
+ if char in "rR":
+ show_rerun(terminalreporter, lines)
+
+ if lines:
+ tr._tw.sep("=", "rerun test summary info")
+ for line in lines:
+ tr._tw.line(line)
+
+
+def show_rerun(terminalreporter, lines):
+ rerun = terminalreporter.stats.get("rerun")
+ if rerun:
+ for rep in rerun:
+ pos = rep.nodeid
+ lines.append("RERUN {}".format(pos))
+
+
+if not PYTEST_GTE_61:
+ from _pytest.resultlog import ResultLog
+
+ class RerunResultLog(ResultLog):
+ def __init__(self, config, logfile):
+ ResultLog.__init__(self, config, logfile)
+
+ def pytest_runtest_logreport(self, report):
+ """
+ Adds support for rerun report fix for issue:
+ https://github.com/pytest-dev/pytest-rerunfailures/issues/28
+ """
+ if report.when != "call" and report.passed:
+ return
+ res = self.config.hook.pytest_report_teststatus(report=report)
+ code = res[1]
+ if code == "x":
+ longrepr = str(report.longrepr)
+ elif code == "X":
+ longrepr = ""
+ elif report.passed:
+ longrepr = ""
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ elif report.skipped:
+ longrepr = str(report.longrepr[2])
+ elif report.outcome == "rerun":
+ longrepr = str(report.longrepr)
+ else:
+ longrepr = str(report.longrepr)
+
+ self.log_outcome(report, code, longrepr)
--- /dev/null
+[bdist_wheel]
+universal = 0
+
+[check-manifest]
+ignore =
+ .pre-commit-config.yaml
+
+[egg_info]
+tag_build =
+tag_date = 0
+
--- /dev/null
+from setuptools import setup
+
+with open("README.rst") as readme, open("CHANGES.rst") as changelog:
+ long_description = ".. contents::\n\n" + readme.read() + "\n\n" + changelog.read()
+
+setup(
+ name="pytest-rerunfailures",
+ version="9.1.1",
+ description="pytest plugin to re-run tests to eliminate flaky failures",
+ long_description=long_description,
+ author="Leah Klearman",
+ author_email="lklrmn@gmail.com",
+ url="https://github.com/pytest-dev/pytest-rerunfailures",
+ py_modules=["pytest_rerunfailures"],
+ entry_points={"pytest11": ["rerunfailures = pytest_rerunfailures"]},
+ install_requires=["setuptools>=40.0", "pytest >= 5.0"],
+ python_requires=">=3.5",
+ license="Mozilla Public License 2.0 (MPL 2.0)",
+ keywords="py.test pytest rerun failures flaky",
+ zip_safe=False,
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Framework :: Pytest",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Operating System :: POSIX",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: MacOS :: MacOS X",
+ "Topic :: Software Development :: Quality Assurance",
+ "Topic :: Software Development :: Testing",
+ "Topic :: Utilities",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: PyPy",
+ ],
+)
--- /dev/null
+import random
+import time
+from unittest import mock
+
+import pkg_resources
+import pytest
+
+
+pytest_plugins = "pytester"
+
+PYTEST_GTE_61 = pkg_resources.parse_version(
+ pytest.__version__
+) >= pkg_resources.parse_version("6.1")
+
+
+def temporary_failure(count=1):
+ return """
+ import py
+ path = py.path.local(__file__).dirpath().ensure('test.res')
+ count = path.read() or 1
+ if int(count) <= {0}:
+ path.write(int(count) + 1)
+ raise Exception('Failure: {{0}}'.format(count))""".format(
+ count
+ )
+
+
+def check_outcome_field(outcomes, field_name, expected_value):
+ field_value = outcomes.get(field_name, 0)
+ assert (
+ field_value == expected_value
+ ), "outcomes.{} has unexpected value. Expected '{}' but got '{}'".format(
+ field_name, expected_value, field_value
+ )
+
+
+def assert_outcomes(
+ result, passed=1, skipped=0, failed=0, error=0, xfailed=0, xpassed=0, rerun=0,
+):
+ outcomes = result.parseoutcomes()
+ check_outcome_field(outcomes, "passed", passed)
+ check_outcome_field(outcomes, "skipped", skipped)
+ check_outcome_field(outcomes, "failed", failed)
+ check_outcome_field(outcomes, "xfailed", xfailed)
+ check_outcome_field(outcomes, "xpassed", xpassed)
+ check_outcome_field(outcomes, "rerun", rerun)
+
+
+def test_error_when_run_with_pdb(testdir):
+ testdir.makepyfile("def test_pass(): pass")
+ result = testdir.runpytest("--reruns", "1", "--pdb")
+ result.stderr.fnmatch_lines_random("ERROR: --reruns incompatible with --pdb")
+
+
+def test_no_rerun_on_pass(testdir):
+ testdir.makepyfile("def test_pass(): pass")
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result)
+
+
+def test_no_rerun_on_skipif_mark(testdir):
+ reason = str(random.random())
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.skipif(reason='{}')
+ def test_skip():
+ pass
+ """.format(
+ reason
+ )
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, skipped=1)
+
+
+def test_no_rerun_on_skip_call(testdir):
+ reason = str(random.random())
+ testdir.makepyfile(
+ """
+ import pytest
+ def test_skip():
+ pytest.skip('{}')
+ """.format(
+ reason
+ )
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, skipped=1)
+
+
+def test_no_rerun_on_xfail_mark(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.xfail()
+ def test_xfail():
+ assert False
+ """
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, xfailed=1)
+
+
+def test_no_rerun_on_xfail_call(testdir):
+ reason = str(random.random())
+ testdir.makepyfile(
+ """
+ import pytest
+ def test_xfail():
+ pytest.xfail('{}')
+ """.format(
+ reason
+ )
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, xfailed=1)
+
+
+def test_no_rerun_on_xpass(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.xfail()
+ def test_xpass():
+ pass
+ """
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, xpassed=1)
+
+
+def test_rerun_fails_after_consistent_setup_failure(testdir):
+ testdir.makepyfile("def test_pass(): pass")
+ testdir.makeconftest(
+ """
+ def pytest_runtest_setup(item):
+ raise Exception('Setup failure')"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, error=1, rerun=1)
+
+
+def test_rerun_passes_after_temporary_setup_failure(testdir):
+ testdir.makepyfile("def test_pass(): pass")
+ testdir.makeconftest(
+ """
+ def pytest_runtest_setup(item):
+ {}""".format(
+ temporary_failure()
+ )
+ )
+ result = testdir.runpytest("--reruns", "1", "-r", "R")
+ assert_outcomes(result, passed=1, rerun=1)
+
+
+def test_rerun_fails_after_consistent_test_failure(testdir):
+ testdir.makepyfile("def test_fail(): assert False")
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, failed=1, rerun=1)
+
+
+def test_rerun_passes_after_temporary_test_failure(testdir):
+ testdir.makepyfile(
+ """
+ def test_pass():
+ {}""".format(
+ temporary_failure()
+ )
+ )
+ result = testdir.runpytest("--reruns", "1", "-r", "R")
+ assert_outcomes(result, passed=1, rerun=1)
+
+
+def test_rerun_passes_after_temporary_test_failure_with_flaky_mark(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.flaky(reruns=2)
+ def test_pass():
+ {}""".format(
+ temporary_failure(2)
+ )
+ )
+ result = testdir.runpytest("-r", "R")
+ assert_outcomes(result, passed=1, rerun=2)
+
+
+def test_reruns_if_flaky_mark_is_called_without_options(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.flaky()
+ def test_pass():
+ {}""".format(
+ temporary_failure(1)
+ )
+ )
+ result = testdir.runpytest("-r", "R")
+ assert_outcomes(result, passed=1, rerun=1)
+
+
+def test_reruns_if_flaky_mark_is_called_with_positional_argument(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.flaky(2)
+ def test_pass():
+ {}""".format(
+ temporary_failure(2)
+ )
+ )
+ result = testdir.runpytest("-r", "R")
+ assert_outcomes(result, passed=1, rerun=2)
+
+
+def test_no_extra_test_summary_for_reruns_by_default(testdir):
+ testdir.makepyfile(
+ """
+ def test_pass():
+ {}""".format(
+ temporary_failure()
+ )
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert "RERUN" not in result.stdout.str()
+ assert "1 rerun" in result.stdout.str()
+
+
+def test_extra_test_summary_for_reruns(testdir):
+ testdir.makepyfile(
+ """
+ def test_pass():
+ {}""".format(
+ temporary_failure()
+ )
+ )
+ result = testdir.runpytest("--reruns", "1", "-r", "R")
+ result.stdout.fnmatch_lines_random(["RERUN test_*:*"])
+ assert "1 rerun" in result.stdout.str()
+
+
+def test_verbose(testdir):
+ testdir.makepyfile(
+ """
+ def test_pass():
+ {}""".format(
+ temporary_failure()
+ )
+ )
+ result = testdir.runpytest("--reruns", "1", "-v")
+ result.stdout.fnmatch_lines_random(["test_*:* RERUN*"])
+ assert "1 rerun" in result.stdout.str()
+
+
+def test_no_rerun_on_class_setup_error_without_reruns(testdir):
+ testdir.makepyfile(
+ """
+ class TestFoo(object):
+ @classmethod
+ def setup_class(cls):
+ assert False
+
+ def test_pass():
+ pass"""
+ )
+ result = testdir.runpytest("--reruns", "0")
+ assert_outcomes(result, passed=0, error=1, rerun=0)
+
+
+def test_rerun_on_class_setup_error_with_reruns(testdir):
+ testdir.makepyfile(
+ """
+ class TestFoo(object):
+ @classmethod
+ def setup_class(cls):
+ assert False
+
+ def test_pass():
+ pass"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=0, error=1, rerun=1)
+
+
+@pytest.mark.skipif(PYTEST_GTE_61, reason="--result-log removed in pytest>=6.1")
+def test_rerun_with_resultslog(testdir):
+ testdir.makepyfile(
+ """
+ def test_fail():
+ assert False"""
+ )
+
+ result = testdir.runpytest("--reruns", "2", "--result-log", "./pytest.log")
+
+ assert_outcomes(result, passed=0, failed=1, rerun=2)
+
+
+@pytest.mark.parametrize("delay_time", [-1, 0, 0.0, 1, 2.5])
+def test_reruns_with_delay(testdir, delay_time):
+ testdir.makepyfile(
+ """
+ def test_fail():
+ assert False"""
+ )
+
+ time.sleep = mock.MagicMock()
+
+ result = testdir.runpytest("--reruns", "3", "--reruns-delay", str(delay_time))
+
+ if delay_time < 0:
+ result.stdout.fnmatch_lines(
+ "*UserWarning: Delay time between re-runs cannot be < 0. "
+ "Using default value: 0"
+ )
+ delay_time = 0
+
+ time.sleep.assert_called_with(delay_time)
+
+ assert_outcomes(result, passed=0, failed=1, rerun=3)
+
+
+@pytest.mark.parametrize("delay_time", [-1, 0, 0.0, 1, 2.5])
+def test_reruns_with_delay_marker(testdir, delay_time):
+ testdir.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.flaky(reruns=2, reruns_delay={})
+ def test_fail_two():
+ assert False""".format(
+ delay_time
+ )
+ )
+
+ time.sleep = mock.MagicMock()
+
+ result = testdir.runpytest()
+
+ if delay_time < 0:
+ result.stdout.fnmatch_lines(
+ "*UserWarning: Delay time between re-runs cannot be < 0. "
+ "Using default value: 0"
+ )
+ delay_time = 0
+
+ time.sleep.assert_called_with(delay_time)
+
+ assert_outcomes(result, passed=0, failed=1, rerun=2)
+
+
+def test_rerun_on_setup_class_with_error_with_reruns(testdir):
+ """
+ Case: setup_class throwing error on the first execution for parametrized test
+ """
+ testdir.makepyfile(
+ """
+ import pytest
+
+ pass_fixture = False
+
+ class TestFoo(object):
+ @classmethod
+ def setup_class(cls):
+ global pass_fixture
+ if not pass_fixture:
+ pass_fixture = True
+ assert False
+ assert True
+ @pytest.mark.parametrize('param', [1, 2, 3])
+ def test_pass(self, param):
+ assert param"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=3, rerun=1)
+
+
+def test_rerun_on_class_scope_fixture_with_error_with_reruns(testdir):
+ """
+ Case: Class scope fixture throwing error on the first execution
+ for parametrized test
+ """
+ testdir.makepyfile(
+ """
+ import pytest
+
+ pass_fixture = False
+
+ class TestFoo(object):
+
+ @pytest.fixture(scope="class")
+ def setup_fixture(self):
+ global pass_fixture
+ if not pass_fixture:
+ pass_fixture = True
+ assert False
+ assert True
+ @pytest.mark.parametrize('param', [1, 2, 3])
+ def test_pass(self, setup_fixture, param):
+ assert param"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=3, rerun=1)
+
+
+def test_rerun_on_module_fixture_with_reruns(testdir):
+ """
+ Case: Module scope fixture is not re-executed when class scope fixture throwing
+ error on the first execution for parametrized test
+ """
+ testdir.makepyfile(
+ """
+ import pytest
+
+ pass_fixture = False
+
+ @pytest.fixture(scope='module')
+ def module_fixture():
+ assert not pass_fixture
+
+ class TestFoo(object):
+ @pytest.fixture(scope="class")
+ def setup_fixture(self):
+ global pass_fixture
+ if not pass_fixture:
+ pass_fixture = True
+ assert False
+ assert True
+ def test_pass_1(self, module_fixture, setup_fixture):
+ assert True
+
+ def test_pass_2(self, module_fixture, setup_fixture):
+ assert True"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=2, rerun=1)
+
+
+def test_rerun_on_session_fixture_with_reruns(testdir):
+ """
+ Case: Module scope fixture is not re-executed when class scope fixture
+ throwing error on the first execution for parametrized test
+ """
+ testdir.makepyfile(
+ """
+ import pytest
+
+ pass_fixture = False
+
+ @pytest.fixture(scope='session')
+ def session_fixture():
+ assert not pass_fixture
+
+ class TestFoo(object):
+ @pytest.fixture(scope="class")
+ def setup_fixture(self):
+ global pass_fixture
+ if not pass_fixture:
+ pass_fixture = True
+ assert False
+ assert True
+
+ def test_pass_1(self, session_fixture, setup_fixture):
+ assert True
+ def test_pass_2(self, session_fixture, setup_fixture):
+ assert True"""
+ )
+ result = testdir.runpytest("--reruns", "1")
+ assert_outcomes(result, passed=2, rerun=1)
+
+
+def test_execution_count_exposed(testdir):
+ testdir.makepyfile("def test_pass(): assert True")
+ testdir.makeconftest(
+ """
+ def pytest_runtest_teardown(item):
+ assert item.execution_count == 3"""
+ )
+ result = testdir.runpytest("--reruns", "2")
+ assert_outcomes(result, passed=3, rerun=2)
+
+
+def test_rerun_report(testdir):
+ testdir.makepyfile("def test_pass(): assert False")
+ testdir.makeconftest(
+ """
+ def pytest_runtest_logreport(report):
+ assert hasattr(report, 'rerun')
+ assert isinstance(report.rerun, int)
+ assert report.rerun <= 2
+ """
+ )
+ result = testdir.runpytest("--reruns", "2")
+ assert_outcomes(result, failed=1, rerun=2, passed=0)
+
+
+def test_pytest_runtest_logfinish_is_called(testdir):
+ hook_message = "Message from pytest_runtest_logfinish hook"
+ testdir.makepyfile("def test_pass(): pass")
+ testdir.makeconftest(
+ r"""
+ def pytest_runtest_logfinish(nodeid, location):
+ print("\n{}\n")
+ """.format(
+ hook_message
+ )
+ )
+ result = testdir.runpytest("--reruns", "1", "-s")
+ result.stdout.fnmatch_lines(hook_message)
+
+
+@pytest.mark.parametrize(
+ "only_rerun_texts, should_rerun",
+ [
+ (["AssertionError"], True),
+ (["Assertion*"], True),
+ (["Assertion"], True),
+ (["ValueError"], False),
+ ([""], True),
+ (["AssertionError: "], True),
+ (["AssertionError: ERR"], True),
+ (["ERR"], True),
+ (["AssertionError,ValueError"], False),
+ (["AssertionError ValueError"], False),
+ (["AssertionError", "ValueError"], True),
+ ],
+)
+def test_only_rerun_flag(testdir, only_rerun_texts, should_rerun):
+ testdir.makepyfile('def test_only_rerun(): raise AssertionError("ERR")')
+
+ num_failed = 1
+ num_passed = 0
+ num_reruns = 1
+ num_reruns_actual = num_reruns if should_rerun else 0
+
+ pytest_args = ["--reruns", str(num_reruns)]
+ for only_rerun_text in only_rerun_texts:
+ pytest_args.extend(["--only-rerun", only_rerun_text])
+ result = testdir.runpytest(*pytest_args)
+ assert_outcomes(
+ result, passed=num_passed, failed=num_failed, rerun=num_reruns_actual
+ )
--- /dev/null
+# Tox (https://tox.readthedocs.io/en/latest/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported Python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[flake8]
+# NOTE: This is kept in line with Black
+# See: https://black.readthedocs.io/en/stable/the_black_code_style.html#line-length
+max-line-length = 88
+
+[tox]
+envlist =
+ linting
+ py{35,36,37,38,py3}-pytest{50,51,52,53,54,60,61}
+minversion = 3.17.1
+
+[testenv]
+commands = pytest test_pytest_rerunfailures.py {posargs}
+deps =
+ pytest50: pytest==5.0.*
+ pytest51: pytest==5.1.*
+ pytest52: pytest==5.2.*
+ pytest53: pytest==5.3.*
+ pytest54: pytest==5.4.*
+ pytest60: pytest==6.0.*
+ pytest61: pytest==6.1.*
+
+[testenv:linting]
+basepython = python3
+commands = pre-commit run --all-files --show-diff-on-failure {posargs:}
+deps = pre-commit>=1.11.0
+skip_install = True