about: Ideas for new features and improvements
---
+
+<!--
+Thanks for suggesting a feature!
+
+Quick check-list while suggesting features:
+-->
+
+#### What's the problem this feature will solve?
+<!-- What are you trying to do, that you are unable to achieve with pytest as it currently stands? -->
+
+#### Describe the solution you'd like
+<!-- A clear and concise description of what you want to happen. -->
+
+<!-- Provide examples of real-world use cases that this would enable and how it solves the problem described above. -->
+
+#### Alternative Solutions
+<!-- Have you tried to workaround the problem using a pytest plugin or other tools? Or a different approach to solving this issue? Please elaborate here. -->
+
+#### Additional context
+<!-- Add any other context, links, etc. about the feature here. -->
--- /dev/null
+version: 2
+updates:
+- package-ecosystem: pip
+ directory: "/testing/plugins_integration"
+ schedule:
+ interval: weekly
+ time: "03:00"
+ open-pull-requests-limit: 10
+ allow:
+ - dependency-type: direct
+ - dependency-type: indirect
- master
- "[0-9]+.[0-9]+.x"
tags:
- - "*"
+ - "[0-9]+.[0-9]+.[0-9]+"
+ - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+"
pull_request:
branches:
fail-fast: false
matrix:
name: [
- "windows-py35",
"windows-py36",
"windows-py37",
"windows-py37-pluggy",
"windows-py38",
- "ubuntu-py35",
"ubuntu-py36",
"ubuntu-py37",
"ubuntu-py37-pluggy",
]
include:
- - name: "windows-py35"
- python: "3.5"
- os: windows-latest
- tox_env: "py35-xdist"
- use_coverage: true
- name: "windows-py36"
python: "3.6"
os: windows-latest
tox_env: "py38-unittestextras"
use_coverage: true
- - name: "ubuntu-py35"
- python: "3.5"
- os: ubuntu-latest
- tox_env: "py35-xdist"
- name: "ubuntu-py36"
python: "3.6"
os: ubuntu-latest
- name: "ubuntu-py37"
python: "3.7"
os: ubuntu-latest
- tox_env: "py37-lsof-numpy-oldattrs-pexpect"
+ tox_env: "py37-lsof-numpy-pexpect"
use_coverage: true
- name: "ubuntu-py37-pluggy"
python: "3.7"
os: ubuntu-latest
tox_env: "py38-xdist"
- name: "ubuntu-py39"
- python: "3.9-dev"
+ python: "3.9"
os: ubuntu-latest
tox_env: "py39-xdist"
- name: "ubuntu-pypy3"
fetch-depth: 0
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v2
- if: matrix.python != '3.9-dev'
- with:
- python-version: ${{ matrix.python }}
- - name: Set up Python ${{ matrix.python }} (deadsnakes)
- uses: deadsnakes/action@v2.0.0
- if: matrix.python == '3.9-dev'
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- name: set PY
- run: echo "::set-env name=PY::$(python -c 'import hashlib, sys;print(hashlib.sha256(sys.version.encode()+sys.executable.encode()).hexdigest())')"
- - uses: actions/cache@v1
+ run: echo "name=PY::$(python -c 'import hashlib, sys;print(hashlib.sha256(sys.version.encode()+sys.executable.encode()).hexdigest())')" >> $GITHUB_ENV
+ - uses: actions/cache@v2
with:
path: ~/.cache/pre-commit
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
- - run: pip install tox
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install tox
- run: tox -e linting
deploy:
env/
.env/
.venv/
+/pythonenv*/
3rdparty/
.tox
.cache
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
- rev: v1.7.0
+ rev: v1.8.0
hooks:
- id: blacken-docs
additional_dependencies: [black==19.10b0]
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v3.1.0
+ rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
exclude: _pytest/(debugging|hookspec).py
language_version: python3
- repo: https://gitlab.com/pycqa/flake8
- rev: 3.8.2
+ rev: 3.8.3
hooks:
- id: flake8
language_version: python3
- flake8-typing-imports==1.9.0
- flake8-docstrings==1.5.0
- repo: https://github.com/asottile/reorder_python_imports
- rev: v2.3.0
+ rev: v2.3.5
hooks:
- id: reorder-python-imports
- args: ['--application-directories=.:src', --py3-plus]
+ args: ['--application-directories=.:src', --py36-plus]
- repo: https://github.com/asottile/pyupgrade
- rev: v2.4.4
+ rev: v2.7.2
hooks:
- id: pyupgrade
- args: [--py3-plus]
+ args: [--py36-plus]
- repo: https://github.com/asottile/setup-cfg-fmt
- rev: v1.9.0
+ rev: v1.11.0
hooks:
- id: setup-cfg-fmt
# TODO: when upgrading setup-cfg-fmt this can be removed
args: [--max-py-version=3.9]
+- repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.6.0
+ hooks:
+ - id: python-use-type-annotations
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v0.780 # NOTE: keep this in sync with setup.cfg.
+ rev: v0.790
hooks:
- id: mypy
files: ^(src/|testing/)
args: []
+ additional_dependencies:
+ - iniconfig>=1.1.0
+ - py>=1.8.2
+ - attrs>=19.2.0
+ - packaging
- repo: local
hooks:
- id: rst
+++ /dev/null
-language: python
-dist: trusty
-python: '3.5.1'
-cache: false
-
-env:
- global:
- - PYTEST_ADDOPTS=-vv
-
-# setuptools-scm needs all tags in order to obtain a proper version
-git:
- depth: false
-
-install:
- - python -m pip install --upgrade --pre tox
-
-jobs:
- include:
- # Coverage for Python 3.5.{0,1} specific code, mostly typing related.
- - env: TOXENV=py35 PYTEST_COVERAGE=1 PYTEST_ADDOPTS="-k test_raises_cyclic_reference"
- before_install:
- # Work around https://github.com/jaraco/zipp/issues/40.
- - python -m pip install -U 'setuptools>=34.4.0' virtualenv==16.7.9
-
-before_script:
- - |
- # Do not (re-)upload coverage with cron runs.
- if [[ "$TRAVIS_EVENT_TYPE" = cron ]]; then
- PYTEST_COVERAGE=0
- fi
- - |
- if [[ "$PYTEST_COVERAGE" = 1 ]]; then
- export COVERAGE_FILE="$PWD/.coverage"
- export COVERAGE_PROCESS_START="$PWD/.coveragerc"
- export _PYTEST_TOX_COVERAGE_RUN="coverage run -m"
- export _PYTEST_TOX_EXTRA_DEP=coverage-enable-subprocess
- fi
-
-script: tox
-
-after_success:
- - |
- if [[ "$PYTEST_COVERAGE" = 1 ]]; then
- env CODECOV_NAME="$TOXENV-$TRAVIS_OS_NAME" scripts/report-coverage.sh -F Travis
- fi
-
-notifications:
- irc:
- channels:
- - "chat.freenode.net#pytest"
- on_success: change
- on_failure: change
- skip_join: true
- email:
- - pytest-commit@python.org
-
-branches:
- only:
- - master
- - /^\d+\.\d+\.x$/
Anton Lodder
Antony Lee
Arel Cordero
+Ariel Pillemer
Armin Rigo
Aron Coyle
Aron Curzon
Christian Neumüller
Christian Theunert
Christian Tismer
+Christine Mecklenborg
Christoph Buelter
Christopher Dignam
Christopher Gilling
Diego Russo
Dmitry Dygalo
Dmitry Pribysh
+Dominic Mortlock
Duncan Betts
Edison Gustavo Muenz
Edoardo Batini
Florian Dahlitz
Floris Bruynooghe
Gabriel Reis
+Garvit Shubham
Gene Wood
George Kussumoto
Georgy Dyuldin
Ionuț Turturică
Iwan Briquemont
Jaap Broekhuizen
+Jakob van Santen
Jakub Mitoraj
Jan Balster
Janne Vanhala
Kale Kundert
Kamran Ahmad
Karl O. Pinc
+Karthikeyan Singaravelan
Katarzyna Jachim
Katarzyna Król
Katerina Koukiou
Maxim Filipenko
Maximilian Cosmo Sitter
mbyt
+Mickey Pashov
Michael Aquilina
Michael Birtwell
Michael Droettboom
Pavel Karateev
Paweł Adamczak
Pedro Algarvio
+Petter Strandmark
Philipp Loose
Pieter Mulder
Piotr Banaszkiewicz
Piotr Helm
+Prakhar Gurunani
Prashant Anand
+Prashant Sharma
Pulkit Goyal
Punyashloka Biswal
Quentin Pradet
Samuel Dion-Girardeau
Samuel Searles-Bryant
Samuele Pedroni
+Sanket Duthade
Sankt Petersbug
Segev Finer
Serhii Mozghovyi
Seth Junot
+Shubham Adep
Simon Gomizelj
Simon Kerr
Skylar Downes
Sylvain Marié
Tadek Teleżyński
Takafumi Arakaki
+Tanvi Mehta
Tarcisio Fischer
Tareq Alayan
Ted Xiao
Yoav Caspi
Zac Hatfield-Dodds
Zoltán Máté
+Zsolt Cserna
$ pytest testing/test_config.py
-
-#. Commit and push once your tests pass and you are happy with your change(s)::
-
- $ git commit -a -m "<commit message>"
- $ git push -u
-
#. Create a new changelog entry in ``changelog``. The file should be named ``<issueid>.<type>.rst``,
where *issueid* is the number of the issue related to the change and *type* is one of
``feature``, ``improvement``, ``bugfix``, ``doc``, ``deprecation``, ``breaking``, ``vendor``
#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order.
+#. Commit and push once your tests pass and you are happy with your change(s)::
+
+ $ git commit -a -m "<commit message>"
+ $ git push -u
+
#. Finally, submit a pull request through the GitHub website using this data::
head-fork: YOUR_GITHUB_USERNAME/pytest
- Can run `unittest <https://docs.pytest.org/en/stable/unittest.html>`_ (or trial),
`nose <https://docs.pytest.org/en/stable/nose.html>`_ test suites out of the box
-- Python 3.5+ and PyPy3
+- Python 3.6+ and PyPy3
- Rich plugin architecture, with over 850+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community
#. Open a PR for ``cherry-pick-release`` and merge it once CI passes. No need to wait for approvals if there were no conflicts on the previous step.
+#. For major and minor releases, tag the release cherry-pick merge commit in master with
+ a dev tag for the next feature release::
+
+ git checkout master
+ git pull
+ git tag MAJOR.{MINOR+1}.0.dev0
+ git push git@github.com:pytest-dev/pytest.git MAJOR.{MINOR+1}.0.dev0
+
#. Send an email announcement with the contents from::
doc/en/announce/release-<VERSION>.rst
--- /dev/null
+from unittest import TestCase # noqa: F401
+
+for i in range(15000):
+ exec(
+ f"""
+class Test{i}(TestCase):
+ @classmethod
+ def setUpClass(cls): pass
+ def test_1(self): pass
+ def test_2(self): pass
+ def test_3(self): pass
+"""
+ )
--- /dev/null
+for i in range(5000):
+ exec(
+ f"""
+class Test{i}:
+ @classmethod
+ def setup_class(cls): pass
+ def test_1(self): pass
+ def test_2(self): pass
+ def test_3(self): pass
+"""
+ )
:maxdepth: 2
+ release-6.2.0
release-6.1.2
release-6.1.1
release-6.1.0
--- /dev/null
+pytest-6.2.0
+=======================================
+
+The pytest team is proud to announce the 6.2.0 release!
+
+This release contains new features, improvements, bug fixes, and breaking changes, so users
+are encouraged to take a look at the CHANGELOG carefully:
+
+ https://docs.pytest.org/en/stable/changelog.html
+
+For complete documentation, please visit:
+
+ https://docs.pytest.org/en/stable/
+
+As usual, you can upgrade from PyPI via:
+
+ pip install -U pytest
+
+Thanks to all of the contributors to this release:
+
+* Adam Johnson
+* Albert Villanova del Moral
+* Anthony Sottile
+* Anton
+* Ariel Pillemer
+* Bruno Oliveira
+* Charles Aracil
+* Christine M
+* Christine Mecklenborg
+* Cserna Zsolt
+* Dominic Mortlock
+* Emiel van de Laar
+* Florian Bruhin
+* Garvit Shubham
+* Gustavo Camargo
+* Hugo Martins
+* Hugo van Kemenade
+* Jakob van Santen
+* Josias Aurel
+* Jürgen Gmach
+* Karthikeyan Singaravelan
+* Katarzyna
+* Kyle Altendorf
+* Manuel Mariñez
+* Matthew Hughes
+* Matthias Gabriel
+* Max Voitko
+* Maximilian Cosmo Sitter
+* Mikhail Fesenko
+* Nimesh Vashistha
+* Pedro Algarvio
+* Petter Strandmark
+* Prakhar Gurunani
+* Prashant Sharma
+* Ran Benita
+* Ronny Pfannschmidt
+* Sanket Duthade
+* Shubham Adep
+* Simon K
+* Tanvi Mehta
+* Thomas Grainger
+* Tim Hoffmann
+* Vasilis Gerakaris
+* William Jamir Silva
+* Zac Hatfield-Dodds
+* crricks
+* dependabot[bot]
+* duthades
+* frankgerhardt
+* kwgchi
+* mickeypash
+* symonk
+
+
+Happy testing,
+The pytest Development Team
------------------------------------------
In order to write assertions about raised exceptions, you can use
-``pytest.raises`` as a context manager like this:
+:func:`pytest.raises` as a context manager like this:
.. code-block:: python
function, so in the above example ``match='123'`` would have worked as
well.
-There's an alternate form of the ``pytest.raises`` function where you pass
+There's an alternate form of the :func:`pytest.raises` function where you pass
a function that will be executed with the given ``*args`` and ``**kwargs`` and
assert that the given exception is raised:
def test_f():
f()
-Using ``pytest.raises`` is likely to be better for cases where you are testing
-exceptions your own code is deliberately raising, whereas using
+Using :func:`pytest.raises` is likely to be better for cases where you are
+testing exceptions your own code is deliberately raising, whereas using
``@pytest.mark.xfail`` with a check function is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen)
or bugs in dependencies.
While we implement those modifications we try to ensure an easy transition and don't want to impose unnecessary churn on our users and community/plugin authors.
-As of now, pytest considers multipe types of backward compatibility transitions:
+As of now, pytest considers multiple types of backward compatibility transitions:
a) trivial: APIs which trivially translate to the new mechanism,
and do not cause problematic changes.
When the deprecation expires (e.g. 4.0 is released), we won't remove the deprecated functionality immediately, but will use the standard warning filters to turn them into **errors** by default. This approach makes it explicit that removal is imminent, and still gives you time to turn the deprecated feature into a warning instead of an error so it can be dealt with in your own time. In the next minor release (e.g. 4.1), the feature will be effectively removed.
-c) true breakage: should only to be considered when normal transition is unreasonably unsustainable and would offset important development/features by years.
+c) true breakage: should only be considered when normal transition is unreasonably unsustainable and would offset important development/features by years.
In addition, they should be limited to APIs where the number of actual users is very small (for example only impacting some plugins), and can be coordinated with the community in advance.
Examples for such upcoming changes:
After there's no hard *-1* on the issue it should be followed up by an initial proof-of-concept Pull Request.
- This POC serves as both a coordination point to assess impact and potential inspriation to come up with a transitional solution after all.
+ This POC serves as both a coordination point to assess impact and potential inspiration to come up with a transitional solution after all.
After a reasonable amount of time the PR can be merged to base a new major release.
function invocation, created as a sub directory of the base temporary
directory.
+ By default, a new base temporary directory is created each test session,
+ and old bases are removed after 3 sessions, to aid in debugging. If
+ ``--basetemp`` is used then it is cleared each session. See :ref:`base
+ temporary directory`.
+
The returned object is a `py.path.local`_ path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
function invocation, created as a sub directory of the base temporary
directory.
- The returned object is a :class:`pathlib.Path` object.
-
- .. note::
+ By default, a new base temporary directory is created each test session,
+ and old bases are removed after 3 sessions, to aid in debugging. If
+ ``--basetemp`` is used then it is cleared each session. See :ref:`base
+ temporary directory`.
- In python < 3.6 this is a pathlib2.Path.
+ The returned object is a :class:`pathlib.Path` object.
no tests ran in 0.12s
.. towncrier release notes start
+pytest 6.2.0 (2020-12-12)
+=========================
+
+Breaking Changes
+----------------
+
+- `#7808 <https://github.com/pytest-dev/pytest/issues/7808>`_: pytest now supports python3.6+ only.
+
+
+
+Deprecations
+------------
+
+- `#7469 <https://github.com/pytest-dev/pytest/issues/7469>`_: Directly constructing/calling the following classes/functions is now deprecated:
+
+ - ``_pytest.cacheprovider.Cache``
+ - ``_pytest.cacheprovider.Cache.for_config()``
+ - ``_pytest.cacheprovider.Cache.clear_cache()``
+ - ``_pytest.cacheprovider.Cache.cache_dir_from_config()``
+ - ``_pytest.capture.CaptureFixture``
+ - ``_pytest.fixtures.FixtureRequest``
+ - ``_pytest.fixtures.SubRequest``
+ - ``_pytest.logging.LogCaptureFixture``
+ - ``_pytest.pytester.Pytester``
+ - ``_pytest.pytester.Testdir``
+ - ``_pytest.recwarn.WarningsRecorder``
+ - ``_pytest.recwarn.WarningsChecker``
+ - ``_pytest.tmpdir.TempPathFactory``
+ - ``_pytest.tmpdir.TempdirFactory``
+
+ These have always been considered private, but now issue a deprecation warning, which may become a hard error in pytest 7.0.0.
+
+
+- `#7530 <https://github.com/pytest-dev/pytest/issues/7530>`_: The ``--strict`` command-line option has been deprecated, use ``--strict-markers`` instead.
+
+ We have plans to maybe in the future to reintroduce ``--strict`` and make it an encompassing flag for all strictness
+ related options (``--strict-markers`` and ``--strict-config`` at the moment, more might be introduced in the future).
+
+
+- `#7988 <https://github.com/pytest-dev/pytest/issues/7988>`_: The ``@pytest.yield_fixture`` decorator/function is now deprecated. Use :func:`pytest.fixture` instead.
+
+ ``yield_fixture`` has been an alias for ``fixture`` for a very long time, so can be search/replaced safely.
+
+
+
+Features
+--------
+
+- `#5299 <https://github.com/pytest-dev/pytest/issues/5299>`_: pytest now warns about unraisable exceptions and unhandled thread exceptions that occur in tests on Python>=3.8.
+ See :ref:`unraisable` for more information.
+
+
+- `#7425 <https://github.com/pytest-dev/pytest/issues/7425>`_: New :fixture:`pytester` fixture, which is identical to :fixture:`testdir` but its methods return :class:`pathlib.Path` when appropriate instead of ``py.path.local``.
+
+ This is part of the movement to use :class:`pathlib.Path` objects internally, in order to remove the dependency to ``py`` in the future.
+
+ Internally, the old :class:`Testdir <_pytest.pytester.Testdir>` is now a thin wrapper around :class:`Pytester <_pytest.pytester.Pytester>`, preserving the old interface.
+
+
+- `#7695 <https://github.com/pytest-dev/pytest/issues/7695>`_: A new hook was added, `pytest_markeval_namespace` which should return a dictionary.
+ This dictionary will be used to augment the "global" variables available to evaluate skipif/xfail/xpass markers.
+
+ Pseudo example
+
+ ``conftest.py``:
+
+ .. code-block:: python
+
+ def pytest_markeval_namespace():
+ return {"color": "red"}
+
+ ``test_func.py``:
+
+ .. code-block:: python
+
+ @pytest.mark.skipif("color == 'blue'", reason="Color is not red")
+ def test_func():
+ assert False
+
+
+- `#8006 <https://github.com/pytest-dev/pytest/issues/8006>`_: It is now possible to construct a :class:`~pytest.MonkeyPatch` object directly as ``pytest.MonkeyPatch()``,
+ in cases when the :fixture:`monkeypatch` fixture cannot be used. Previously some users imported it
+ from the private `_pytest.monkeypatch.MonkeyPatch` namespace.
+
+ Additionally, :meth:`MonkeyPatch.context <pytest.MonkeyPatch.context>` is now a classmethod,
+ and can be used as ``with MonkeyPatch.context() as mp: ...``. This is the recommended way to use
+ ``MonkeyPatch`` directly, since unlike the ``monkeypatch`` fixture, an instance created directly
+ is not ``undo()``-ed automatically.
+
+
+
+Improvements
+------------
+
+- `#1265 <https://github.com/pytest-dev/pytest/issues/1265>`_: Added an ``__str__`` implementation to the :class:`~pytest.pytester.LineMatcher` class which is returned from ``pytester.run_pytest().stdout`` and similar. It returns the entire output, like the existing ``str()`` method.
+
+
+- `#2044 <https://github.com/pytest-dev/pytest/issues/2044>`_: Verbose mode now shows the reason that a test was skipped in the test's terminal line after the "SKIPPED", "XFAIL" or "XPASS".
+
+
+- `#7469 <https://github.com/pytest-dev/pytest/issues/7469>`_ The types of builtin pytest fixtures are now exported so they may be used in type annotations of test functions.
+ The newly-exported types are:
+
+ - ``pytest.FixtureRequest`` for the :fixture:`request` fixture.
+ - ``pytest.Cache`` for the :fixture:`cache` fixture.
+ - ``pytest.CaptureFixture[str]`` for the :fixture:`capfd` and :fixture:`capsys` fixtures.
+ - ``pytest.CaptureFixture[bytes]`` for the :fixture:`capfdbinary` and :fixture:`capsysbinary` fixtures.
+ - ``pytest.LogCaptureFixture`` for the :fixture:`caplog` fixture.
+ - ``pytest.Pytester`` for the :fixture:`pytester` fixture.
+ - ``pytest.Testdir`` for the :fixture:`testdir` fixture.
+ - ``pytest.TempdirFactory`` for the :fixture:`tmpdir_factory` fixture.
+ - ``pytest.TempPathFactory`` for the :fixture:`tmp_path_factory` fixture.
+ - ``pytest.MonkeyPatch`` for the :fixture:`monkeypatch` fixture.
+ - ``pytest.WarningsRecorder`` for the :fixture:`recwarn` fixture.
+
+ Constructing them is not supported (except for `MonkeyPatch`); they are only meant for use in type annotations.
+ Doing so will emit a deprecation warning, and may become a hard-error in pytest 7.0.
+
+ Subclassing them is also not supported. This is not currently enforced at runtime, but is detected by type-checkers such as mypy.
+
+
+- `#7527 <https://github.com/pytest-dev/pytest/issues/7527>`_: When a comparison between :func:`namedtuple <collections.namedtuple>` instances of the same type fails, pytest now shows the differing field names (possibly nested) instead of their indexes.
+
+
+- `#7615 <https://github.com/pytest-dev/pytest/issues/7615>`_: :meth:`Node.warn <_pytest.nodes.Node.warn>` now permits any subclass of :class:`Warning`, not just :class:`PytestWarning <pytest.PytestWarning>`.
+
+
+- `#7701 <https://github.com/pytest-dev/pytest/issues/7701>`_: Improved reporting when using ``--collected-only``. It will now show the number of collected tests in the summary stats.
+
+
+- `#7710 <https://github.com/pytest-dev/pytest/issues/7710>`_: Use strict equality comparison for non-numeric types in :func:`pytest.approx` instead of
+ raising :class:`TypeError`.
+
+ This was the undocumented behavior before 3.7, but is now officially a supported feature.
+
+
+- `#7938 <https://github.com/pytest-dev/pytest/issues/7938>`_: New ``--sw-skip`` argument which is a shorthand for ``--stepwise-skip``.
+
+
+- `#8023 <https://github.com/pytest-dev/pytest/issues/8023>`_: Added ``'node_modules'`` to default value for :confval:`norecursedirs`.
+
+
+- `#8032 <https://github.com/pytest-dev/pytest/issues/8032>`_: :meth:`doClassCleanups <unittest.TestCase.doClassCleanups>` (introduced in :mod:`unittest` in Python and 3.8) is now called appropriately.
+
+
+
+Bug Fixes
+---------
+
+- `#4824 <https://github.com/pytest-dev/pytest/issues/4824>`_: Fixed quadratic behavior and improved performance of collection of items using autouse fixtures and xunit fixtures.
+
+
+- `#7758 <https://github.com/pytest-dev/pytest/issues/7758>`_: Fixed an issue where some files in packages are getting lost from ``--lf`` even though they contain tests that failed. Regressed in pytest 5.4.0.
+
+
+- `#7911 <https://github.com/pytest-dev/pytest/issues/7911>`_: Directories created by by :fixture:`tmp_path` and :fixture:`tmpdir` are now considered stale after 3 days without modification (previous value was 3 hours) to avoid deleting directories still in use in long running test suites.
+
+
+- `#7913 <https://github.com/pytest-dev/pytest/issues/7913>`_: Fixed a crash or hang in :meth:`pytester.spawn <_pytest.pytester.Pytester.spawn>` when the :mod:`readline` module is involved.
+
+
+- `#7951 <https://github.com/pytest-dev/pytest/issues/7951>`_: Fixed handling of recursive symlinks when collecting tests.
+
+
+- `#7981 <https://github.com/pytest-dev/pytest/issues/7981>`_: Fixed symlinked directories not being followed during collection. Regressed in pytest 6.1.0.
+
+
+- `#8016 <https://github.com/pytest-dev/pytest/issues/8016>`_: Fixed only one doctest being collected when using ``pytest --doctest-modules path/to/an/__init__.py``.
+
+
+
+Improved Documentation
+----------------------
+
+- `#7429 <https://github.com/pytest-dev/pytest/issues/7429>`_: Add more information and use cases about skipping doctests.
+
+
+- `#7780 <https://github.com/pytest-dev/pytest/issues/7780>`_: Classes which should not be inherited from are now marked ``final class`` in the API reference.
+
+
+- `#7872 <https://github.com/pytest-dev/pytest/issues/7872>`_: ``_pytest.config.argparsing.Parser.addini()`` accepts explicit ``None`` and ``"string"``.
+
+
+- `#7878 <https://github.com/pytest-dev/pytest/issues/7878>`_: In pull request section, ask to commit after editing changelog and authors file.
+
+
+
+Trivial/Internal Changes
+------------------------
+
+- `#7802 <https://github.com/pytest-dev/pytest/issues/7802>`_: The ``attrs`` dependency requirement is now >=19.2.0 instead of >=17.4.0.
+
+
+- `#8014 <https://github.com/pytest-dev/pytest/issues/8014>`_: `.pyc` files created by pytest's assertion rewriting now conform to the newer PEP-552 format on Python>=3.7.
+ (These files are internal and only interpreted by pytest itself.)
+
+
pytest 6.1.2 (2020-10-28)
=========================
- `#5416 <https://github.com/pytest-dev/pytest/issues/5416>`_: Fix PytestUnknownMarkWarning in run/skip example.
+pytest 4.6.11 (2020-06-04)
+==========================
+
+Bug Fixes
+---------
+
+- `#6334 <https://github.com/pytest-dev/pytest/issues/6334>`_: Fix summary entries appearing twice when ``f/F`` and ``s/S`` report chars were used at the same time in the ``-r`` command-line option (for example ``-rFf``).
+
+ The upper case variants were never documented and the preferred form should be the lower case.
+
+
+- `#7310 <https://github.com/pytest-dev/pytest/issues/7310>`_: Fix ``UnboundLocalError: local variable 'letter' referenced before
+ assignment`` in ``_pytest.terminal.pytest_report_teststatus()``
+ when plugins return report objects in an unconventional state.
+
+ This was making ``pytest_report_teststatus()`` skip
+ entering if-block branches that declare the ``letter`` variable.
+
+ The fix was to set the initial value of the ``letter`` before
+ the if-block cascade so that it always has a value.
+
+
+pytest 4.6.10 (2020-05-08)
+==========================
+
+Features
+--------
+
+- `#6870 <https://github.com/pytest-dev/pytest/issues/6870>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
+
+ Remark: while this is technically a new feature and according to our `policy <https://docs.pytest.org/en/latest/py27-py34-deprecation.html#what-goes-into-4-6-x-releases>`_ it should not have been backported, we have opened an exception in this particular case because it fixes a serious interaction with ``pytest-xdist``, so it can also be considered a bugfix.
+
+Trivial/Internal Changes
+------------------------
+
+- `#6404 <https://github.com/pytest-dev/pytest/issues/6404>`_: Remove usage of ``parser`` module, deprecated in Python 3.9.
+
+
pytest 4.6.9 (2020-01-04)
=========================
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
+import ast
import os
import sys
+from typing import List
+from typing import TYPE_CHECKING
from _pytest import __version__ as version
-from _pytest.compat import TYPE_CHECKING
if TYPE_CHECKING:
import sphinx.application
)
configure_logging(app)
+
+ # Make Sphinx mark classes with "final" when decorated with @final.
+ # We need this because we import final from pytest._compat, not from
+ # typing (for Python < 3.8 compat), so Sphinx doesn't detect it.
+ # To keep things simple we accept any `@final` decorator.
+ # Ref: https://github.com/pytest-dev/pytest/pull/7780
+ import sphinx.pycode.ast
+ import sphinx.pycode.parser
+
+ original_is_final = sphinx.pycode.parser.VariableCommentPicker.is_final
+
+ def patched_is_final(self, decorators: List[ast.expr]) -> bool:
+ if original_is_final(self, decorators):
+ return True
+ return any(
+ sphinx.pycode.ast.unparse(decorator) == "final" for decorator in decorators
+ )
+
+ sphinx.pycode.parser.VariableCommentPicker.is_final = patched_is_final
Below is a complete list of all pytest features which are considered deprecated. Using those features will issue
:class:`PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters <warnings>`.
+The ``--strict`` command-line option
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. deprecated:: 6.2
+
+The ``--strict`` command-line option has been deprecated in favor of ``--strict-markers``, which
+better conveys what the option does.
+
+We have plans to maybe in the future to reintroduce ``--strict`` and make it an encompassing
+flag for all strictness related options (``--strict-markers`` and ``--strict-config``
+at the moment, more might be introduced in the future).
+
+
+The ``yield_fixture`` function/decorator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. deprecated:: 6.2
+
+``pytest.yield_fixture`` is a deprecated alias for :func:`pytest.fixture`.
+
+It has been so for a very long time, so can be search/replaced safely.
+
The ``pytest_warning_captured`` hook
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Doctest integration for modules and test files
=========================================================
-By default all files matching the ``test*.txt`` pattern will
+By default, all files matching the ``test*.txt`` pattern will
be run through the python standard ``doctest`` module. You
can change the pattern by issuing:
[pytest]
addopts = --doctest-modules
-.. note::
-
- The builtin pytest doctest supports only ``doctest`` blocks, but if you are looking
- for more advanced checking over *all* your documentation,
- including doctests, ``.. codeblock:: python`` Sphinx directive support,
- and any other examples your documentation may include, you may wish to
- consider `Sybil <https://sybil.readthedocs.io/en/latest/index.html>`__.
- It provides pytest integration out of the box.
-
Encoding
--------
.. code-block:: ini
[pytest]
- doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
+ doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
Alternatively, options can be enabled by an inline comment in the doc test
itself:
>>> ...
>>>
-Note that the fixture needs to be defined in a place visible by pytest, for example a `conftest.py`
+Note that the fixture needs to be defined in a place visible by pytest, for example, a `conftest.py`
file or plugin; normal python files containing docstrings are not normally scanned for fixtures
unless explicitly configured by :confval:`python_files`.
Meaning that if you put your doctest with your source code, the relevant conftest.py needs to be in the same directory tree.
Fixtures will not be discovered in a sibling directory tree!
-Skipping tests dynamically
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+Skipping tests
+^^^^^^^^^^^^^^
+
+For the same reasons one might want to skip normal tests, it is also possible to skip
+tests inside doctests.
+
+To skip a single check inside a doctest you can use the standard
+`doctest.SKIP <https://docs.python.org/3/library/doctest.html#doctest.SKIP>`__ directive:
+
+.. code-block:: python
+
+ def test_random(y):
+ """
+ >>> random.random() # doctest: +SKIP
+ 0.156231223
+
+ >>> 1 + 1
+ 2
+ """
+
+This will skip the first check, but not the second.
-.. versionadded:: 4.4
+pytest also allows using the standard pytest functions :func:`pytest.skip` and
+:func:`pytest.xfail` inside doctests, which might be useful because you can
+then skip/xfail tests based on external conditions:
-You can use ``pytest.skip`` to dynamically skip doctests. For example:
.. code-block:: text
>>> if sys.platform.startswith('win'):
... pytest.skip('this doctest does not work on Windows')
...
+ >>> import fcntl
+ >>> ...
+
+However using those functions is discouraged because it reduces the readability of the
+docstring.
+
+.. note::
+
+ :func:`pytest.skip` and :func:`pytest.xfail` behave differently depending
+ if the doctests are in a Python file (in docstrings) or a text file containing
+ doctests intermingled with text:
+
+ * Python modules (docstrings): the functions only act in that specific docstring,
+ letting the other docstrings in the same module execute as normal.
+
+ * Text files: the functions will skip/xfail the checks for the rest of the entire
+ file.
+
+
+Alternatives
+------------
+
+While the built-in pytest support provides a good set of functionalities for using
+doctests, if you use them extensively you might be interested in those external packages
+which add many more features, and include pytest integration:
+
+* `pytest-doctestplus <https://github.com/astropy/pytest-doctestplus>`__: provides
+ advanced doctest support and enables the testing of reStructuredText (".rst") files.
+
+* `Sybil <https://sybil.readthedocs.io>`__: provides a way to test examples in
+ your documentation by parsing them from the documentation source and evaluating
+ the parsed examples as part of your normal test run.
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
items = [1, 2, 3]
- print("items is {!r}".format(items))
+ print(f"items is {items!r}")
a, b = items.pop()
def test_some_error(self):
return
mod = item.getparent(pytest.Module).obj
if hasattr(mod, "hello"):
- print("mod.hello {!r}".format(mod.hello))
+ print(f"mod.hello {mod.hello!r}")
[pytest]
markers =
webtest: mark a test as a webtest.
+ slow: mark test as slow.
-You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:
+Multiple custom markers can be registered, by defining each one in its own line, as shown in above example.
+
+You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` and ``slow`` markers:
.. code-block:: pytest
$ pytest --markers
@pytest.mark.webtest: mark a test as a webtest.
+ @pytest.mark.slow: mark test as slow.
+
@pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/stable/warnings.html#pytest-mark-filterwarnings
@pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test.
.
1 passed in 0.12s
-marking platform specific tests with pytest
+Marking platform specific tests with pytest
--------------------------------------------------------------
.. regendoc:wipe
def __init__(self, version, picklefile):
self.pythonpath = shutil.which(version)
if not self.pythonpath:
- pytest.skip("{!r} not found".format(version))
+ pytest.skip(f"{version!r} not found")
self.picklefile = picklefile
def dumps(self, obj):
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
def test_basic_objects(python1, python2, obj):
python1.dumps(obj)
- python2.load_and_is_true("obj == {}".format(obj))
+ python2.load_and_is_true(f"obj == {obj}")
<YamlItem hello>
<YamlItem ok>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 2 tests collected in 0.12s ========================
)
def reportinfo(self):
- return self.fspath, 0, "usecase: {}".format(self.name)
+ return self.fspath, 0, f"usecase: {self.name}"
class YamlException(Exception):
<Function test_timedistance_v3[forward]>
<Function test_timedistance_v3[backward]>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 8 tests collected in 0.12s ========================
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
together with the actual data, instead of listing them separately.
<Function test_demo1[advanced]>
<Function test_demo2[advanced]>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 4 tests collected in 0.12s ========================
Note that we told ``metafunc.parametrize()`` that your scenario values
should be considered class-scoped. With pytest-2.3 this leads to a
<Function test_db_initialized[d1]>
<Function test_db_initialized[d2]>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 2 tests collected in 0.12s ========================
And then when we run the test:
<Function simple_check>
<Function complex_check>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 2 tests collected in 0.12s ========================
You can check for multiple glob patterns by adding a space between the patterns:
<Function test_method>
<Function test_anothermethod>
- ========================== no tests ran in 0.12s ===========================
+ ======================== 3 tests collected in 0.12s ========================
.. _customizing-test-collection:
<Module 'pkg/module_py2.py'>
<Function 'test_only_on_python2'>
- ====== no tests ran in 0.04 seconds ======
+ ====== 1 tests found in 0.04 seconds ======
If you run with a Python 3 interpreter both the one test and the ``setup.py``
file will be left out:
rootdir: $REGENDOC_TMPDIR, configfile: pytest.ini
collected 0 items
- ========================== no tests ran in 0.12s ===========================
+ ======================= no tests collected in 0.12s ========================
It's also possible to ignore files based on Unix shell-style wildcards by adding
patterns to :globalvar:`collect_ignore_glob`.
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
items = [1, 2, 3]
- print("items is {!r}".format(items))
+ print(f"items is {items!r}")
> a, b = items.pop()
E TypeError: cannot unpack non-iterable int object
========================== no tests ran in 0.12s ===========================
-profiling test duration
+Profiling test duration
--------------------------
.. regendoc:wipe
0.10s call test_some_are_slow.py::test_funcfast
============================ 3 passed in 0.12s =============================
-incremental testing - test steps
+Incremental testing - test steps
---------------------------------------------------
.. regendoc:wipe
executing).
-post-process test reports / failures
+Post-process test reports / failures
---------------------------------------
If you want to postprocess test reports and need access to the executing
<Function test_ehlo[mail.python.org]>
<Function test_noop[mail.python.org]>
- ========================== no tests ran in 0.12s ===========================
+ ======================= 10 tests collected in 0.12s ========================
.. _`fixture-parametrize-marks`:
Running this test will *skip* the invocation of ``data_set`` with value ``2``:
-.. code-block:: pytest
+.. code-block::
$ pytest test_fixture_marks.py -v
=========================== test session starts ============================
test_fixture_marks.py::test_data[0] PASSED [ 33%]
test_fixture_marks.py::test_data[1] PASSED [ 66%]
- test_fixture_marks.py::test_data[2] SKIPPED [100%]
+ test_fixture_marks.py::test_data[2] SKIPPED (unconditional skip) [100%]
======================= 2 passed, 1 skipped in 0.12s =======================
Installation and Getting Started
===================================
-**Pythons**: Python 3.5, 3.6, 3.7, 3.8, 3.9, PyPy3
+**Pythons**: Python 3.6, 3.7, 3.8, 3.9, PyPy3
**Platforms**: Linux and Windows
.. code-block:: bash
$ pytest --version
- pytest 6.1.2
+ pytest 6.2.0
.. _`simpletest`:
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box
-- Python 3.5+ and PyPy 3
+- Python 3.6+ and PyPy 3
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community
fundamentally incompatible with pytest because they don't support fixtures
properly since collection and test execution are separated.
+Migrating from nose to pytest
+------------------------------
+
+`nose2pytest <https://github.com/pytest-dev/nose2pytest>`_ is a Python script
+and pytest plugin to help convert Nose-based tests into pytest-based tests.
+Specifically, the script transforms nose.tools.assert_* function calls into
+raw assert statements, while preserving format of original arguments
+as much as possible.
+
.. _nose: https://nose.readthedocs.io/en/latest/
When using `usefixtures` in hooks, it can only load fixtures when applied to a test function before test setup
(for example in the `pytest_collection_modifyitems` hook).
- Also not that his mark has no effect when applied to **fixtures**.
+ Also note that this mark has no effect when applied to **fixtures**.
mark.args == (10, "slow")
mark.kwargs == {"method": "thread"}
+Example for using multiple custom markers:
+
+.. code-block:: python
+
+ @pytest.mark.timeout(10, "slow", method="thread")
+ @pytest.mark.slow
+ def test_function():
+ ...
+
+When :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>` or :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers_with_node>` is used with multiple markers, the marker closest to the function will be iterated over first. The above example will result in ``@pytest.mark.slow`` followed by ``@pytest.mark.timeout(...)``.
.. _`fixtures-api`:
Under the hood, the cache plugin uses the simple
``dumps``/``loads`` API of the :py:mod:`json` stdlib module.
-.. currentmodule:: _pytest.cacheprovider
+``config.cache`` is an instance of :class:`pytest.Cache`:
-.. automethod:: Cache.get
-.. automethod:: Cache.set
-.. automethod:: Cache.makedir
+.. autoclass:: pytest.Cache()
+ :members:
.. fixture:: capsys
**Tutorial**: :doc:`capture`.
-.. currentmodule:: _pytest.capture
-
-.. autofunction:: capsys()
+.. autofunction:: _pytest.capture.capsys()
:no-auto-options:
- Returns an instance of :py:class:`CaptureFixture`.
+ Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
Example:
captured = capsys.readouterr()
assert captured.out == "hello\n"
-.. autoclass:: CaptureFixture()
+.. autoclass:: pytest.CaptureFixture()
:members:
**Tutorial**: :doc:`capture`.
-.. autofunction:: capsysbinary()
+.. autofunction:: _pytest.capture.capsysbinary()
:no-auto-options:
- Returns an instance of :py:class:`CaptureFixture`.
+ Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
Example:
**Tutorial**: :doc:`capture`.
-.. autofunction:: capfd()
+.. autofunction:: _pytest.capture.capfd()
:no-auto-options:
- Returns an instance of :py:class:`CaptureFixture`.
+ Returns an instance of :class:`CaptureFixture[str] <pytest.CaptureFixture>`.
Example:
**Tutorial**: :doc:`capture`.
-.. autofunction:: capfdbinary()
+.. autofunction:: _pytest.capture.capfdbinary()
:no-auto-options:
- Returns an instance of :py:class:`CaptureFixture`.
+ Returns an instance of :class:`CaptureFixture[bytes] <pytest.CaptureFixture>`.
Example:
The ``request`` fixture is a special fixture providing information of the requesting test function.
-.. autoclass:: _pytest.fixtures.FixtureRequest()
+.. autoclass:: pytest.FixtureRequest()
:members:
.. autofunction:: _pytest.logging.caplog()
:no-auto-options:
- Returns a :class:`_pytest.logging.LogCaptureFixture` instance.
+ Returns a :class:`pytest.LogCaptureFixture` instance.
-.. autoclass:: _pytest.logging.LogCaptureFixture
+.. autoclass:: pytest.LogCaptureFixture()
:members:
monkeypatch
~~~~~~~~~~~
-.. currentmodule:: _pytest.monkeypatch
-
**Tutorial**: :doc:`monkeypatch`.
.. autofunction:: _pytest.monkeypatch.monkeypatch()
:no-auto-options:
- Returns a :class:`MonkeyPatch` instance.
+ Returns a :class:`~pytest.MonkeyPatch` instance.
-.. autoclass:: _pytest.monkeypatch.MonkeyPatch
+.. autoclass:: pytest.MonkeyPatch
:members:
-.. fixture:: testdir
+.. fixture:: pytester
-testdir
-~~~~~~~
+pytester
+~~~~~~~~
+
+.. versionadded:: 6.2
-.. currentmodule:: _pytest.pytester
+Provides a :class:`~pytest.Pytester` instance that can be used to run and test pytest itself.
-This fixture provides a :class:`Testdir` instance useful for black-box testing of test files, making it ideal to
-test plugins.
+It provides an empty directory where pytest can be executed in isolation, and contains facilities
+to write tests, configuration files, and match against expected output.
-To use it, include in your top-most ``conftest.py`` file:
+To use it, include in your topmost ``conftest.py`` file:
.. code-block:: python
-.. autoclass:: Testdir()
+.. autoclass:: pytest.Pytester()
+ :members:
+
+.. autoclass:: _pytest.pytester.RunResult()
:members:
-.. autoclass:: RunResult()
+.. autoclass:: _pytest.pytester.LineMatcher()
:members:
+ :special-members: __str__
-.. autoclass:: LineMatcher()
+.. autoclass:: _pytest.pytester.HookRecorder()
+ :members:
+
+.. fixture:: testdir
+
+testdir
+~~~~~~~
+
+Identical to :fixture:`pytester`, but provides an instance whose methods return
+legacy ``py.path.local`` objects instead when applicable.
+
+New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`.
+
+.. autoclass:: pytest.Testdir()
:members:
**Tutorial**: :ref:`assertwarnings`
-.. currentmodule:: _pytest.recwarn
-
-.. autofunction:: recwarn()
+.. autofunction:: _pytest.recwarn.recwarn()
:no-auto-options:
-.. autoclass:: WarningsRecorder()
+.. autoclass:: pytest.WarningsRecorder()
:members:
Each recorded warning is an instance of :class:`warnings.WarningMessage`.
**Tutorial**: :doc:`tmpdir`
-.. currentmodule:: _pytest.tmpdir
-
-.. autofunction:: tmp_path()
+.. autofunction:: _pytest.tmpdir.tmp_path()
:no-auto-options:
-.. fixture:: tmp_path_factory
+.. fixture:: _pytest.tmpdir.tmp_path_factory
tmp_path_factory
~~~~~~~~~~~~~~~~
.. _`tmp_path_factory factory api`:
-``tmp_path_factory`` instances have the following methods:
-
-.. currentmodule:: _pytest.tmpdir
+``tmp_path_factory`` is an instance of :class:`~pytest.TempPathFactory`:
-.. automethod:: TempPathFactory.mktemp
-.. automethod:: TempPathFactory.getbasetemp
+.. autoclass:: pytest.TempPathFactory()
.. fixture:: tmpdir
**Tutorial**: :doc:`tmpdir`
-.. currentmodule:: _pytest.tmpdir
-
-.. autofunction:: tmpdir()
+.. autofunction:: _pytest.tmpdir.tmpdir()
:no-auto-options:
.. _`tmpdir factory api`:
-``tmpdir_factory`` instances have the following methods:
+``tmp_path_factory`` is an instance of :class:`~pytest.TempdirFactory`:
-.. currentmodule:: _pytest.tmpdir
-
-.. automethod:: TempdirFactory.mktemp
-.. automethod:: TempdirFactory.getbasetemp
+.. autoclass:: pytest.TempdirFactory()
.. _`hook-reference`:
.. autofunction:: pytest_collection_modifyitems
+.. note::
+ If this hook is implemented in ``conftest.py`` files, it always receives all collected items, not only those
+ under the ``conftest.py`` where it is implemented.
+
.. autofunction:: pytest_collection_finish
Test running (runtest) hooks
:members:
:show-inheritance:
+FunctionDefinition
+~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: _pytest.python.FunctionDefinition()
+ :members:
+ :show-inheritance:
+
Item
~~~~
.. autoclass:: pytest.PytestUnknownMarkWarning
:show-inheritance:
+.. autoclass:: pytest.PytestUnraisableExceptionWarning
+ :show-inheritance:
+
+.. autoclass:: pytest.PytestUnhandledThreadExceptionWarning
+ :show-inheritance:
+
Consult the :ref:`internal-warnings` section in the documentation for more information.
[seq] matches any character in seq
[!seq] matches any char not in seq
- Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``.
+ Default patterns are ``'*.egg'``, ``'.*'``, ``'_darcs'``, ``'build'``,
+ ``'CVS'``, ``'dist'``, ``'node_modules'``, ``'venv'``, ``'{arch}'``.
Setting a ``norecursedirs`` replaces the default. Here is an example of
how to avoid certain directories:
failures.
--sw, --stepwise exit on test failure and continue from last failing
test next time
- --stepwise-skip ignore the first failing test but stop on the next
+ --sw-skip, --stepwise-skip
+ ignore the first failing test but stop on the next
failing test
reporting:
--maxfail=num exit after first num failures or errors.
--strict-config any warnings encountered while parsing the `pytest`
section of the configuration file raise errors.
- --strict-markers, --strict
- markers not registered in the `markers` section of
+ --strict-markers markers not registered in the `markers` section of
the configuration file raise errors.
+ --strict (deprecated) alias to --strict-markers.
-c file load configuration from `file` instead of trying to
locate one of the implicit configuration files.
--continue-on-collection-errors
import sys
- @pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
+ @pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7 or higher")
def test_function():
...
at the module level, which is when a condition would otherwise be evaluated for marks.
This will make ``test_function`` ``XFAIL``. Note that no other code is executed after
-the ``pytest.xfail`` call, differently from the marker. That's because it is implemented
+the :func:`pytest.xfail` call, differently from the marker. That's because it is implemented
internally by raising a known exception.
**Reference**: :ref:`pytest.mark.xfail ref`
pytest --runxfail
you can force the running and reporting of an ``xfail`` marked test
-as if it weren't marked at all. This also causes ``pytest.xfail`` to produce no effect.
+as if it weren't marked at all. This also causes :func:`pytest.xfail` to produce no effect.
Examples
~~~~~~~~
provide a temporary directory unique to the test invocation,
created in the `base temporary directory`_.
-``tmp_path`` is a ``pathlib/pathlib2.Path`` object. Here is an example test usage:
+``tmp_path`` is a ``pathlib.Path`` object. Here is an example test usage:
.. code-block:: python
# content of test_tmp_path.py
- import os
-
CONTENT = "content"
> assert 0
E assert 0
- test_tmp_path.py:13: AssertionError
+ test_tmp_path.py:11: AssertionError
========================= short test summary info ==========================
FAILED test_tmp_path.py::test_create_file - assert 0
============================ 1 failed in 0.12s =============================
.. code-block:: python
# content of test_tmpdir.py
- import os
-
-
def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
p.write("content")
> assert 0
E assert 0
- test_tmpdir.py:9: AssertionError
+ test_tmpdir.py:6: AssertionError
========================= short test summary info ==========================
FAILED test_tmpdir.py::test_create_file - assert 0
============================ 1 failed in 0.12s =============================
the command-line using ``-o faulthandler_timeout=X``.
+.. _unraisable:
+
+Warning about unraisable exceptions and unhandled thread exceptions
+-------------------------------------------------------------------
+
+.. versionadded:: 6.2
+
+.. note::
+
+ These features only work on Python>=3.8.
+
+Unhandled exceptions are exceptions that are raised in a situation in which
+they cannot propagate to a caller. The most common case is an exception raised
+in a :meth:`__del__ <object.__del__>` implementation.
+
+Unhandled thread exceptions are exceptions raised in a :class:`~threading.Thread`
+but not handled, causing the thread to terminate uncleanly.
+
+Both types of exceptions are normally considered bugs, but may go unnoticed
+because they don't cause the program itself to crash. Pytest detects these
+conditions and issues a warning that is visible in the test run summary.
+
+The plugins are automatically enabled for pytest runs, unless the
+``-p no:unraisableexception`` (for unraisable exceptions) and
+``-p no:threadexception`` (for thread exceptions) options are given on the
+command-line.
+
+The warnings may be silenced selectivly using the :ref:`pytest.mark.filterwarnings ref`
+mark. The warning categories are :class:`pytest.PytestUnraisableExceptionWarning` and
+:class:`pytest.PytestUnhandledThreadExceptionWarning`.
+
+
Creating JUnitXML format files
----------------------------------------------------
-You can check that code raises a particular warning using ``pytest.warns``,
+You can check that code raises a particular warning using func:`pytest.warns`,
which works in a similar manner to :ref:`raises <assertraises>`:
.. code-block:: python
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
-You can also call ``pytest.warns`` on a function or code string:
+You can also call func:`pytest.warns` on a function or code string:
.. code-block:: python
Recording warnings
------------------
-You can record raised warnings either using ``pytest.warns`` or with
+You can record raised warnings either using func:`pytest.warns` or with
the ``recwarn`` fixture.
-To record with ``pytest.warns`` without asserting anything about the warnings,
+To record with func:`pytest.warns` without asserting anything about the warnings,
pass ``None`` as the expected warning type:
.. code-block:: python
assert w.filename
assert w.lineno
-Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded
+Both ``recwarn`` and func:`pytest.warns` return the same interface for recorded
warnings: a WarningsRecorder instance. To view the recorded warnings, you can
iterate over this instance, call ``len`` on it to get the number of recorded
warnings, or index into it to get a particular recorded warning.
pytest.fail("Expected a warning!")
If no warnings are issued when calling ``f``, then ``not record`` will
-evaluate to ``True``. You can then call ``pytest.fail`` with a
+evaluate to ``True``. You can then call :func:`pytest.fail` with a
custom error message.
.. _internal-warnings:
See also: :ref:`pythonpath`.
+.. note::
+ Some hooks should be implemented only in plugins or conftest.py files situated at the
+ tests root directory due to how pytest discovers plugins during startup,
+ see the documentation of each hook for details.
Writing your own plugin
-----------------------
test_example.py .. [100%]
- ============================= warnings summary =============================
- test_example.py::test_plugin
- $REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time
- testdir.copy_example("test_example.py")
-
- -- Docs: https://docs.pytest.org/en/stable/warnings.html
- ======================= 2 passed, 1 warning in 0.12s =======================
+ ============================ 2 passed in 0.12s =============================
For more information about the result object that ``runpytest()`` returns, and
the methods that it provides please check out the :py:class:`RunResult
filterwarnings = [
"error",
"default:Using or importing the ABCs:DeprecationWarning:unittest2.*",
+ # produced by older pyparsing<=2.2.0.
+ "default:Using or importing the ABCs:DeprecationWarning:pyparsing.*",
"default:the imp module is deprecated in favour of importlib:DeprecationWarning:nose.*",
- "ignore:Module already imported so cannot be rewritten:pytest.PytestWarning",
# produced by python3.6/site.py itself (3.6.7 on Travis, could not trigger it with 3.6.8)."
"ignore:.*U.*mode is deprecated:DeprecationWarning:(?!(pytest|_pytest))",
# produced by pytest-xdist
"ignore:.*type argument to addoption.*:DeprecationWarning",
- # produced by python >=3.5 on execnet (pytest-xdist)
+ # produced on execnet (pytest-xdist)
"ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning",
# pytest's own futurewarnings
"ignore::pytest.PytestExperimentalApiWarning",
showcontent = true
[tool.black]
-target-version = ['py35']
+target-version = ['py36']
Once all builds pass and it has been **approved** by one or more maintainers, the build
can be released by pushing a tag `{version}` to this repository.
+
+Closes #{issue_number}.
"""
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed.")
body = PR_BODY.format(
- comment_url=get_comment_data(payload)["html_url"], version=version
+ comment_url=get_comment_data(payload)["html_url"],
+ version=version,
+ issue_number=issue_number,
)
pr = repo.create_pull(
f"Prepare release {version}",
msg = dedent(
f"""
Found features or breaking changes in `{base_branch}`, and feature releases can only be
- created from `master`.":
+ created from `master`:
"""
)
msg += "\n".join(f"* `{x.name}`" for x in sorted(features + breaking))
stdout = stdout.decode("utf-8")
last_version = stdout.strip()
- stdout = check_output(
- ["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]
- )
+ stdout = check_output(["git", "log", f"{last_version}..HEAD", "--format=%aN"])
stdout = stdout.decode("utf-8")
contributors = set(stdout.splitlines())
Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8")
)
- contributors_text = (
- "\n".join("* {}".format(name) for name in sorted(contributors)) + "\n"
- )
+ contributors_text = "\n".join(f"* {name}" for name in sorted(contributors)) + "\n"
text = template_text.format(version=version, contributors=contributors_text)
- target = Path(__file__).parent.joinpath(
- "../doc/en/announce/release-{}.rst".format(version)
- )
+ target = Path(__file__).parent.joinpath(f"../doc/en/announce/release-{version}.rst")
target.write_text(text, encoding="UTF-8")
print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}")
lines = index_path.read_text(encoding="UTF-8").splitlines()
indent = " "
for index, line in enumerate(lines):
- if line.startswith("{}release-".format(indent)):
+ if line.startswith(f"{indent}release-"):
new_line = indent + target.stem
if line != new_line:
lines.insert(index, new_line)
if not skip_check_links:
check_links()
- msg = "Prepare release version {}".format(version)
+ msg = f"Prepare release version {version}"
check_call(["git", "commit", "-a", "-m", msg])
print()
Operating System :: POSIX
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
- Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
_pytest.mark
pytest
install_requires =
- attrs>=17.4.0
+ attrs>=19.2.0
iniconfig
packaging
- pluggy>=0.12,<1.0
+ pluggy>=0.12,<1.0.0a1
py>=1.8.2
toml
atomicwrites>=1.0;sys_platform=="win32"
colorama;sys_platform=="win32"
importlib-metadata>=0.12;python_version<"3.8"
- pathlib2>=2.2.0;python_version<"3.6"
-python_requires = >=3.5
+python_requires = >=3.6
package_dir =
=src
setup_requires =
- setuptools>=40.0
- setuptools-scm
+ setuptools>=>=42.0
+ setuptools-scm>=3.4
zip_safe = no
[options.entry_points]
py.test=pytest:console_main
[options.extras_require]
-checkqa-mypy =
- mypy==0.780
testing =
argcomplete
hypothesis>=3.56
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
-so the the python-argcomplete-check-easy-install-script does not
+so the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK.
import argcomplete.completers
except ImportError:
sys.exit(-1)
- filescompleter = FastFilesCompleter() # type: Optional[FastFilesCompleter]
+ filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter()
def try_argcomplete(parser: argparse.ArgumentParser) -> None:
argcomplete.autocomplete(parser, always_complete_options=False)
from inspect import CO_VARARGS
from inspect import CO_VARKEYWORDS
from io import StringIO
+from pathlib import Path
from traceback import format_exception_only
from types import CodeType
from types import FrameType
from typing import List
from typing import Mapping
from typing import Optional
+from typing import overload
from typing import Pattern
from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from weakref import ref
from _pytest._io import TerminalWriter
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
-from _pytest.compat import ATTRS_EQ_FIELD
from _pytest.compat import final
from _pytest.compat import get_real_func
-from _pytest.compat import overload
-from _pytest.compat import TYPE_CHECKING
-from _pytest.pathlib import Path
if TYPE_CHECKING:
- from typing import Type
from typing_extensions import Literal
from weakref import ReferenceType
class Code:
"""Wrapper around Python code objects."""
- def __init__(self, rawcode) -> None:
- if not hasattr(rawcode, "co_filename"):
- rawcode = getrawcode(rawcode)
- if not isinstance(rawcode, CodeType):
- raise TypeError("not a code object: {!r}".format(rawcode))
- self.filename = rawcode.co_filename
- self.firstlineno = rawcode.co_firstlineno - 1
- self.name = rawcode.co_name
- self.raw = rawcode
+ __slots__ = ("raw",)
+
+ def __init__(self, obj: CodeType) -> None:
+ self.raw = obj
+
+ @classmethod
+ def from_function(cls, obj: object) -> "Code":
+ return cls(getrawcode(obj))
def __eq__(self, other):
return self.raw == other.raw
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
+ @property
+ def firstlineno(self) -> int:
+ return self.raw.co_firstlineno - 1
+
+ @property
+ def name(self) -> str:
+ return self.raw.co_name
+
@property
def path(self) -> Union[py.path.local, str]:
"""Return a path object pointing to source code, or an ``str`` in
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
+ __slots__ = ("raw",)
+
def __init__(self, frame: FrameType) -> None:
- self.lineno = frame.f_lineno - 1
- self.f_globals = frame.f_globals
- self.f_locals = frame.f_locals
self.raw = frame
- self.code = Code(frame.f_code)
+
+ @property
+ def lineno(self) -> int:
+ return self.raw.f_lineno - 1
+
+ @property
+ def f_globals(self) -> Dict[str, Any]:
+ return self.raw.f_globals
+
+ @property
+ def f_locals(self) -> Dict[str, Any]:
+ return self.raw.f_locals
+
+ @property
+ def code(self) -> Code:
+ return Code(self.raw.f_code)
@property
def statement(self) -> "Source":
class TracebackEntry:
"""A single entry in a Traceback."""
- _repr_style = None # type: Optional[Literal["short", "long"]]
- exprinfo = None
+ __slots__ = ("_rawentry", "_excinfo", "_repr_style")
def __init__(
self,
rawentry: TracebackType,
excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
) -> None:
- self._excinfo = excinfo
self._rawentry = rawentry
- self.lineno = rawentry.tb_lineno - 1
+ self._excinfo = excinfo
+ self._repr_style: Optional['Literal["short", "long"]'] = None
+
+ @property
+ def lineno(self) -> int:
+ return self._rawentry.tb_lineno - 1
def set_repr_style(self, mode: "Literal['short', 'long']") -> None:
assert mode in ("short", "long")
Mostly for internal use.
"""
- tbh = (
+ tbh: Union[bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]] = (
False
- ) # type: Union[bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]]
+ )
for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals):
# in normal cases, f_locals and f_globals are dictionaries
# however via `exec(...)` / `eval(...)` they can be other types
if isinstance(tb, TracebackType):
def f(cur: TracebackType) -> Iterable[TracebackEntry]:
- cur_ = cur # type: Optional[TracebackType]
+ cur_: Optional[TracebackType] = cur
while cur_ is not None:
yield TracebackEntry(cur_, excinfo=excinfo)
cur_ = cur_.tb_next
def __getitem__(self, key: int) -> TracebackEntry:
...
- @overload # noqa: F811
- def __getitem__(self, key: slice) -> "Traceback": # noqa: F811
+ @overload
+ def __getitem__(self, key: slice) -> "Traceback":
...
- def __getitem__( # noqa: F811
- self, key: Union[int, slice]
- ) -> Union[TracebackEntry, "Traceback"]:
+ def __getitem__(self, key: Union[int, slice]) -> Union[TracebackEntry, "Traceback"]:
if isinstance(key, slice):
return self.__class__(super().__getitem__(key))
else:
def recursionindex(self) -> Optional[int]:
"""Return the index of the frame/TracebackEntry where recursion originates if
appropriate, None if no recursion occurred."""
- cache = {} # type: Dict[Tuple[Any, int, int], List[Dict[str, Any]]]
+ cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
_assert_start_repr = "AssertionError('assert "
- _excinfo = attr.ib(type=Optional[Tuple["Type[_E]", "_E", TracebackType]])
+ _excinfo = attr.ib(type=Optional[Tuple[Type["_E"], "_E", TracebackType]])
_striptext = attr.ib(type=str, default="")
_traceback = attr.ib(type=Optional[Traceback], default=None)
@classmethod
def from_exc_info(
cls,
- exc_info: Tuple["Type[_E]", "_E", TracebackType],
+ exc_info: Tuple[Type[_E], _E, TracebackType],
exprinfo: Optional[str] = None,
) -> "ExceptionInfo[_E]":
"""Return an ExceptionInfo for an existing exc_info tuple.
"""Return an unfilled ExceptionInfo."""
return cls(None)
- def fill_unfilled(self, exc_info: Tuple["Type[_E]", _E, TracebackType]) -> None:
+ def fill_unfilled(self, exc_info: Tuple[Type[_E], _E, TracebackType]) -> None:
"""Fill an unfilled ExceptionInfo created with ``for_later()``."""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@property
- def type(self) -> "Type[_E]":
+ def type(self) -> Type[_E]:
"""The exception class."""
assert (
self._excinfo is not None
return text
def errisinstance(
- self, exc: Union["Type[BaseException]", Tuple["Type[BaseException]", ...]]
+ self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]]
) -> bool:
"""Return True if the exception is an instance of exc.
)
return fmt.repr_excinfo(self)
- def match(self, regexp: "Union[str, Pattern[str]]") -> "Literal[True]":
+ def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]":
"""Check whether the regular expression `regexp` matches the string
representation of the exception using :func:`python:re.search`.
else:
str_repr = safeformat(value)
# if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
- lines.append("{:<10} = {}".format(name, str_repr))
+ lines.append(f"{name:<10} = {str_repr}")
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
entry: TracebackEntry,
excinfo: Optional[ExceptionInfo[BaseException]] = None,
) -> "ReprEntry":
- lines = [] # type: List[str]
+ lines: List[str] = []
style = entry._repr_style if entry._repr_style is not None else self.style
if style in ("short", "long"):
source = self._getentrysource(entry)
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
- extraline = (
+ extraline: Optional[str] = (
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
" The following exception happened when comparing locals in the stack frame:\n"
" {exc_type}: {exc_msg}\n"
exc_msg=str(e),
max_frames=max_frames,
total=len(traceback),
- ) # type: Optional[str]
+ )
# Type ignored because adding two instaces of a List subtype
# currently incorrectly has type List instead of the subtype.
traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore
def repr_excinfo(
self, excinfo: ExceptionInfo[BaseException]
) -> "ExceptionChainRepr":
- repr_chain = (
- []
- ) # type: List[Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]]
- e = excinfo.value # type: Optional[BaseException]
- excinfo_ = excinfo # type: Optional[ExceptionInfo[BaseException]]
+ repr_chain: List[
+ Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]
+ ] = []
+ e: Optional[BaseException] = excinfo.value
+ excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo
descr = None
- seen = set() # type: Set[int]
+ seen: Set[int] = set()
while e is not None and id(e) not in seen:
seen.add(id(e))
if excinfo_:
reprtraceback = self.repr_traceback(excinfo_)
- reprcrash = (
+ reprcrash: Optional[ReprFileLocation] = (
excinfo_._getreprcrash() if self.style != "value" else None
- ) # type: Optional[ReprFileLocation]
+ )
else:
# Fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work.
return ExceptionChainRepr(repr_chain)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class TerminalRepr:
def __str__(self) -> str:
# FYI this is called from pytest-xdist's serialization of exception
# This class is abstract -- only subclasses are instantiated.
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ExceptionRepr(TerminalRepr):
# Provided by subclasses.
- reprcrash = None # type: Optional[ReprFileLocation]
- reprtraceback = None # type: ReprTraceback
+ reprcrash: Optional["ReprFileLocation"]
+ reprtraceback: "ReprTraceback"
def __attrs_post_init__(self) -> None:
- self.sections = [] # type: List[Tuple[str, str, str]]
+ self.sections: List[Tuple[str, str, str]] = []
def addsection(self, name: str, content: str, sep: str = "-") -> None:
self.sections.append((name, content, sep))
tw.line(content)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ExceptionChainRepr(ExceptionRepr):
chain = attr.ib(
type=Sequence[
super().toterminal(tw)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprExceptionInfo(ExceptionRepr):
reprtraceback = attr.ib(type="ReprTraceback")
reprcrash = attr.ib(type="ReprFileLocation")
super().toterminal(tw)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprTraceback(TerminalRepr):
reprentries = attr.ib(type=Sequence[Union["ReprEntry", "ReprEntryNative"]])
extraline = attr.ib(type=Optional[str])
self.extraline = None
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprEntryNative(TerminalRepr):
lines = attr.ib(type=Sequence[str])
- style = "native" # type: _TracebackStyle
+ style: "_TracebackStyle" = "native"
def toterminal(self, tw: TerminalWriter) -> None:
tw.write("".join(self.lines))
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprEntry(TerminalRepr):
lines = attr.ib(type=Sequence[str])
reprfuncargs = attr.ib(type=Optional["ReprFuncArgs"])
# separate indents and source lines that are not failures: we want to
# highlight the code but not the indentation, which may contain markers
# such as "> assert 0"
- fail_marker = "{} ".format(FormattedExcinfo.fail_marker)
+ fail_marker = f"{FormattedExcinfo.fail_marker} "
indent_size = len(fail_marker)
- indents = [] # type: List[str]
- source_lines = [] # type: List[str]
- failure_lines = [] # type: List[str]
+ indents: List[str] = []
+ source_lines: List[str] = []
+ failure_lines: List[str] = []
for index, line in enumerate(self.lines):
is_failure_line = line.startswith(fail_marker)
if is_failure_line:
)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprFileLocation(TerminalRepr):
path = attr.ib(type=str, converter=str)
lineno = attr.ib(type=int)
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
- tw.line(":{}: {}".format(self.lineno, msg))
+ tw.line(f":{self.lineno}: {msg}")
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprLocals(TerminalRepr):
lines = attr.ib(type=Sequence[str])
tw.line(indent + line)
-@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
+@attr.s(eq=False)
class ReprFuncArgs(TerminalRepr):
args = attr.ib(type=Sequence[Tuple[str, object]])
if self.args:
linesofar = ""
for name, value in self.args:
- ns = "{} = {}".format(name, value)
+ ns = f"{name} = {value}"
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
obj = obj.place_as # type: ignore[attr-defined]
try:
- code = Code(obj)
+ code = Code.from_function(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type]
import inspect
import textwrap
import tokenize
+import types
import warnings
from bisect import bisect_right
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
+from typing import overload
from typing import Tuple
from typing import Union
-from _pytest.compat import overload
-
class Source:
"""An immutable object holding a source code fragment.
def __init__(self, obj: object = None) -> None:
if not obj:
- self.lines = [] # type: List[str]
+ self.lines: List[str] = []
elif isinstance(obj, Source):
self.lines = obj.lines
elif isinstance(obj, (tuple, list)):
elif isinstance(obj, str):
self.lines = deindent(obj.split("\n"))
else:
- rawcode = getrawcode(obj)
- src = inspect.getsource(rawcode)
+ try:
+ rawcode = getrawcode(obj)
+ src = inspect.getsource(rawcode)
+ except TypeError:
+ src = inspect.getsource(obj) # type: ignore[arg-type]
self.lines = deindent(src.split("\n"))
def __eq__(self, other: object) -> bool:
def __getitem__(self, key: int) -> str:
...
- @overload # noqa: F811
- def __getitem__(self, key: slice) -> "Source": # noqa: F811
+ @overload
+ def __getitem__(self, key: slice) -> "Source":
...
- def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]: # noqa: F811
+ def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]:
if isinstance(key, int):
return self.lines[key]
else:
return source, lineno
-def getrawcode(obj, trycall: bool = True):
+def getrawcode(obj: object, trycall: bool = True) -> types.CodeType:
"""Return code object for given function."""
try:
- return obj.__code__
+ return obj.__code__ # type: ignore[attr-defined,no-any-return]
except AttributeError:
- obj = getattr(obj, "f_code", obj)
- obj = getattr(obj, "__code__", obj)
- if trycall and not hasattr(obj, "co_firstlineno"):
- if hasattr(obj, "__call__") and not inspect.isclass(obj):
- x = getrawcode(obj.__call__, trycall=False)
- if hasattr(x, "co_firstlineno"):
- return x
- return obj
+ pass
+ if trycall:
+ call = getattr(obj, "__call__", None)
+ if call and not isinstance(obj, type):
+ return getrawcode(call, trycall=False)
+ raise TypeError(f"could not get code object for {obj!r}")
def deindent(lines: Iterable[str]) -> List[str]:
def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]:
# Flatten all statements and except handlers into one lineno-list.
# AST's line numbers start indexing at 1.
- values = [] # type: List[int]
+ values: List[int] = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
- val = getattr(x, name, None) # type: Optional[List[ast.stmt]]
+ val: Optional[List[ast.stmt]] = getattr(x, name, None)
if val:
# Treat the finally/orelse part as its own statement.
values.append(val[0].lineno - 1 - 1)
width: int = 80,
depth: Optional[int] = None,
*,
- compact: bool = False
+ compact: bool = False,
) -> str:
return AlwaysDispatchingPrettyPrinter(
indent=indent, width=width, depth=depth, compact=compact
self._file = file
self.hasmarkup = should_do_markup(file)
self._current_line = ""
- self._terminal_width = None # type: Optional[int]
+ self._terminal_width: Optional[int] = None
self.code_highlight = True
@property
def markup(self, text: str, **markup: bool) -> str:
for name in markup:
if name not in self._esctable:
- raise ValueError("unknown markup: {!r}".format(name))
+ raise ValueError(f"unknown markup: {name!r}")
if self.hasmarkup:
esc = [self._esctable[name] for name, on in markup.items() if on]
if esc:
sepchar: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
- **markup: bool
+ **markup: bool,
) -> None:
if fullwidth is None:
fullwidth = self.fullwidth
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
fill = sepchar * N
- line = "{} {} {}".format(fill, title, fill)
+ line = f"{fill} {title} {fill}"
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
except ImportError:
return source
else:
- highlighted = highlight(
+ highlighted: str = highlight(
source, PythonLexer(), TerminalFormatter(bg="dark")
- ) # type: str
+ )
return highlighted
from typing import Generator
from typing import List
from typing import Optional
+from typing import TYPE_CHECKING
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
from _pytest.assertion.rewrite import assertstate_key
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
def __init__(self, config: Config, mode) -> None:
self.mode = mode
self.trace = config.trace.root.get("assertion")
- self.hook = None # type: Optional[rewrite.AssertionRewritingHook]
+ self.hook: Optional[rewrite.AssertionRewritingHook] = None
def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
import sys
import tokenize
import types
+from pathlib import Path
+from pathlib import PurePath
from typing import Callable
from typing import Dict
from typing import IO
from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import py
from _pytest.assertion.util import ( # noqa: F401
format_explanation as _format_explanation,
)
-from _pytest.compat import fspath
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.main import Session
from _pytest.pathlib import fnmatch_ex
-from _pytest.pathlib import Path
-from _pytest.pathlib import PurePath
from _pytest.store import StoreKey
if TYPE_CHECKING:
- from _pytest.assertion import AssertionState # noqa: F401
+ from _pytest.assertion import AssertionState
assertstate_key = StoreKey["AssertionState"]()
# pytest caches rewritten pycs in pycache dirs
-PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
+PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}"
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
self.fnpats = config.getini("python_files")
except ValueError:
self.fnpats = ["test_*.py", "*_test.py"]
- self.session = None # type: Optional[Session]
- self._rewritten_names = set() # type: Set[str]
- self._must_rewrite = set() # type: Set[str]
+ self.session: Optional[Session] = None
+ self._rewritten_names: Set[str] = set()
+ self._must_rewrite: Set[str] = set()
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506)
self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"}
- self._marked_for_rewrite_cache = {} # type: Dict[str, bool]
+ self._marked_for_rewrite_cache: Dict[str, bool] = {}
self._session_paths_checked = False
def set_session(self, session: Optional[Session]) -> None:
spec is None
# this is a namespace package (without `__init__.py`)
# there's nothing to rewrite there
- # python3.5 - python3.6: `namespace`
+ # python3.6: `namespace`
# python3.7+: `None`
or spec.origin == "namespace"
or spec.origin is None
ok = try_makedirs(cache_dir)
if not ok:
write = False
- state.trace("read only directory: {}".format(cache_dir))
+ state.trace(f"read only directory: {cache_dir}")
cache_name = fn.name[:-3] + PYC_TAIL
pyc = cache_dir / cache_name
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn, pyc, state.trace)
if co is None:
- state.trace("rewriting {!r}".format(fn))
+ state.trace(f"rewriting {fn!r}")
source_stat, co = _rewrite_test(fn, self.config)
if write:
self._writing_pyc = True
finally:
self._writing_pyc = False
else:
- state.trace("found cached rewritten pyc for {}".format(fn))
+ state.trace(f"found cached rewritten pyc for {fn}")
exec(co, module.__dict__)
def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool:
if self._is_marked_for_rewrite(name, state):
return False
- state.trace("early skip of rewriting module: {}".format(name))
+ state.trace(f"early skip of rewriting module: {name}")
return True
def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool:
# always rewrite conftest files
if os.path.basename(fn) == "conftest.py":
- state.trace("rewriting conftest file: {!r}".format(fn))
+ state.trace(f"rewriting conftest file: {fn!r}")
return True
if self.session is not None:
if self.session.isinitpath(py.path.local(fn)):
- state.trace(
- "matched test file (was specified on cmdline): {!r}".format(fn)
- )
+ state.trace(f"matched test file (was specified on cmdline): {fn!r}")
return True
# modules not passed explicitly on the command line are only
fn_path = PurePath(fn)
for pat in self.fnpats:
if fnmatch_ex(pat, fn_path):
- state.trace("matched test file {!r}".format(fn))
+ state.trace(f"matched test file {fn!r}")
return True
return self._is_marked_for_rewrite(name, state)
except KeyError:
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + "."):
- state.trace(
- "matched marked file {!r} (from {!r})".format(name, marked)
- )
+ state.trace(f"matched marked file {name!r} (from {marked!r})")
self._marked_for_rewrite_cache[name] = True
return True
) -> None:
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
- # import. However, there's little reason deviate.
+ # import. However, there's little reason to deviate.
fp.write(importlib.util.MAGIC_NUMBER)
+ # https://www.python.org/dev/peps/pep-0552/
+ if sys.version_info >= (3, 7):
+ flags = b"\x00\x00\x00\x00"
+ fp.write(flags)
# as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
mtime = int(source_stat.st_mtime) & 0xFFFFFFFF
size = source_stat.st_size & 0xFFFFFFFF
- # "<LL" stands for 2 unsigned longs, little-ending
+ # "<LL" stands for 2 unsigned longs, little-endian.
fp.write(struct.pack("<LL", mtime, size))
fp.write(marshal.dumps(co))
pyc: Path,
) -> bool:
try:
- with atomic_write(fspath(pyc), mode="wb", overwrite=True) as fp:
+ with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp:
_write_pyc_fp(fp, source_stat, co)
except OSError as e:
- state.trace("error writing pyc file at {}: {}".format(pyc, e))
+ state.trace(f"error writing pyc file at {pyc}: {e}")
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, pycache dir being a
# file etc.
source_stat: os.stat_result,
pyc: Path,
) -> bool:
- proc_pyc = "{}.{}".format(pyc, os.getpid())
+ proc_pyc = f"{pyc}.{os.getpid()}"
try:
fp = open(proc_pyc, "wb")
except OSError as e:
- state.trace(
- "error writing pyc file at {}: errno={}".format(proc_pyc, e.errno)
- )
+ state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
return False
try:
_write_pyc_fp(fp, source_stat, co)
- os.rename(proc_pyc, fspath(pyc))
+ os.rename(proc_pyc, os.fspath(pyc))
except OSError as e:
- state.trace("error writing pyc file at {}: {}".format(pyc, e))
+ state.trace(f"error writing pyc file at {pyc}: {e}")
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, pycache dir being a
# file etc.
def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]:
"""Read and rewrite *fn* and return the code object."""
- fn_ = fspath(fn)
+ fn_ = os.fspath(fn)
stat = os.stat(fn_)
with open(fn_, "rb") as f:
source = f.read()
Return rewritten code if successful or None if not.
"""
try:
- fp = open(fspath(pyc), "rb")
+ fp = open(os.fspath(pyc), "rb")
except OSError:
return None
with fp:
+ # https://www.python.org/dev/peps/pep-0552/
+ has_flags = sys.version_info >= (3, 7)
try:
- stat_result = os.stat(fspath(source))
+ stat_result = os.stat(os.fspath(source))
mtime = int(stat_result.st_mtime)
size = stat_result.st_size
- data = fp.read(12)
+ data = fp.read(16 if has_flags else 12)
except OSError as e:
- trace("_read_pyc({}): OSError {}".format(source, e))
+ trace(f"_read_pyc({source}): OSError {e}")
return None
# Check for invalid or out of date pyc file.
- if (
- len(data) != 12
- or data[:4] != importlib.util.MAGIC_NUMBER
- or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF)
- ):
- trace("_read_pyc(%s): invalid or out of date pyc" % source)
+ if len(data) != (16 if has_flags else 12):
+ trace("_read_pyc(%s): invalid pyc (too short)" % source)
+ return None
+ if data[:4] != importlib.util.MAGIC_NUMBER:
+ trace("_read_pyc(%s): invalid pyc (bad magic number)" % source)
+ return None
+ if has_flags and data[4:8] != b"\x00\x00\x00\x00":
+ trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source)
+ return None
+ mtime_data = data[8 if has_flags else 4 : 12 if has_flags else 8]
+ if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF:
+ trace("_read_pyc(%s): out of date" % source)
+ return None
+ size_data = data[12 if has_flags else 8 : 16 if has_flags else 12]
+ if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF:
+ trace("_read_pyc(%s): invalid pyc (incorrect size)" % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
- trace("_read_pyc({}): marshal.load error {}".format(source, e))
+ trace(f"_read_pyc({source}): marshal.load error {e}")
return None
if not isinstance(co, types.CodeType):
trace("_read_pyc(%s): not a code object" % source)
def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
"""Return a mapping from {lineno: "assertion test expression"}."""
- ret = {} # type: Dict[int, str]
+ ret: Dict[int, str] = {}
depth = 0
- lines = [] # type: List[str]
- assert_lineno = None # type: Optional[int]
- seen_lines = set() # type: Set[int]
+ lines: List[str] = []
+ assert_lineno: Optional[int] = None
+ seen_lines: Set[int] = set()
def _write_and_reset() -> None:
nonlocal depth, lines, assert_lineno, seen_lines
]
mod.body[pos:pos] = imports
# Collect asserts.
- nodes = [mod] # type: List[ast.AST]
+ nodes: List[ast.AST] = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
- new = [] # type: List[ast.AST]
+ new: List[ast.AST] = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
to format a string of %-formatted values as added by
.explanation_param().
"""
- self.explanation_specifiers = {} # type: Dict[str, ast.expr]
+ self.explanation_specifiers: Dict[str, ast.expr] = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr: ast.expr) -> ast.Name:
"assertion is always true, perhaps remove parentheses?"
),
category=None,
- filename=fspath(self.module_path),
+ filename=os.fspath(self.module_path),
lineno=assert_.lineno,
)
- self.statements = [] # type: List[ast.stmt]
- self.variables = [] # type: List[str]
+ self.statements: List[ast.stmt] = []
+ self.variables: List[str] = []
self.variable_counter = itertools.count()
if self.enable_assertion_pass_hook:
- self.format_variables = [] # type: List[str]
+ self.format_variables: List[str] = []
- self.stack = [] # type: List[Dict[str, ast.expr]]
- self.expl_stmts = [] # type: List[ast.stmt]
+ self.stack: List[Dict[str, ast.expr]] = []
+ self.expl_stmts: List[ast.stmt] = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Process each operand, short-circuiting if needed.
for i, v in enumerate(boolop.values):
if i:
- fail_inner = [] # type: List[ast.stmt]
+ fail_inner: List[ast.stmt] = []
# cond is set in a prior loop iteration below
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
self.expl_stmts = fail_inner
call = ast.Call(app, [expl_format], [])
self.expl_stmts.append(ast.Expr(call))
if i < levels:
- cond = res # type: ast.expr
+ cond: ast.expr = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
- inner = [] # type: List[ast.stmt]
+ inner: List[ast.stmt] = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
symbol = BINOP_MAP[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
- explanation = "({} {} {})".format(left_expl, symbol, right_expl)
+ explanation = f"({left_expl} {symbol} {right_expl})"
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
- outer_expl = "{}\n{{{} = {}\n}}".format(res_expl, res_expl, expl)
+ outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}"
return res, outer_expl
def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]:
- # From Python 3.5, a Starred node can appear in a function call.
+ # A Starred node can appear in a function call.
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
- left_expl = "({})".format(left_expl)
+ left_expl = f"({left_expl})"
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
- next_expl = "({})".format(next_expl)
+ next_expl = f"({next_expl})"
results.append(next_res)
sym = BINOP_MAP[op.__class__]
syms.append(ast.Str(sym))
- expl = "{} {} {}".format(left_expl, sym, next_expl)
+ expl = f"{left_expl} {sym} {next_expl}"
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
- res = ast.BoolOp(ast.And(), load_names) # type: ast.expr
+ res: ast.expr = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
Returns True if successful or if it already exists.
"""
try:
- os.makedirs(fspath(cache_dir), exist_ok=True)
+ os.makedirs(os.fspath(cache_dir), exist_ok=True)
except (FileNotFoundError, NotADirectoryError, FileExistsError):
# One of the path components was not a directory:
# - we're in a zip file
truncated_line_count += 1 # Account for the part-truncated final line
msg = "...Full output truncated"
if truncated_line_count == 1:
- msg += " ({} line hidden)".format(truncated_line_count)
+ msg += f" ({truncated_line_count} line hidden)"
else:
- msg += " ({} lines hidden)".format(truncated_line_count)
- msg += ", {}".format(USAGE_MSG)
+ msg += f" ({truncated_line_count} lines hidden)"
+ msg += f", {USAGE_MSG}"
truncated_explanation.extend(["", str(msg)])
return truncated_explanation
from typing import Mapping
from typing import Optional
from typing import Sequence
-from typing import Tuple
import _pytest._code
from _pytest import outcomes
from _pytest._io.saferepr import _pformat_dispatch
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
-from _pytest.compat import ATTRS_EQ_FIELD
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
-_reprcompare = None # type: Optional[Callable[[str, object, object], Optional[str]]]
+_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None
# Works similarly as _reprcompare attribute. Is populated with the hook call
# when pytest_runtest_setup is called.
-_assertion_pass = None # type: Optional[Callable[[int, str, str], None]]
+_assertion_pass: Optional[Callable[[int, str, str], None]] = None
def format_explanation(explanation: str) -> str:
return isinstance(x, (set, frozenset))
+def isnamedtuple(obj: Any) -> bool:
+ return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None
+
+
def isdatacls(obj: Any) -> bool:
return getattr(obj, "__dataclass_fields__", None) is not None
left_repr = saferepr(left, maxsize=maxsize)
right_repr = saferepr(right, maxsize=maxsize)
- summary = "{} {} {}".format(left_repr, op, right_repr)
+ summary = f"{left_repr} {op} {right_repr}"
explanation = None
try:
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
- if issequence(left) and issequence(right):
+ if type(left) == type(right) and (
+ isdatacls(left) or isattrs(left) or isnamedtuple(left)
+ ):
+ # Note: unlike dataclasses/attrs, namedtuples compare only the
+ # field values, not the type or field names. But this branch
+ # intentionally only handles the same-type case, which was often
+ # used in older code bases before dataclasses/attrs were available.
+ explanation = _compare_eq_cls(left, right, verbose)
+ elif issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
- elif type(left) == type(right) and (isdatacls(left) or isattrs(left)):
- type_fn = (isdatacls, isattrs)
- explanation = _compare_eq_cls(left, right, verbose, type_fn)
elif verbose > 0:
explanation = _compare_eq_verbose(left, right)
if isiterable(left) and isiterable(right):
"""
from difflib import ndiff
- explanation = [] # type: List[str]
+ explanation: List[str] = []
if verbose < 1:
i = 0 # just in case left or right has zero length
left_lines = repr(left).splitlines(keepends)
right_lines = repr(right).splitlines(keepends)
- explanation = [] # type: List[str]
+ explanation: List[str] = []
explanation += ["+" + line for line in left_lines]
explanation += ["-" + line for line in right_lines]
left: Sequence[Any], right: Sequence[Any], verbose: int = 0
) -> List[str]:
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
- explanation = [] # type: List[str]
+ explanation: List[str] = []
len_left = len(left)
len_right = len(right)
for i in range(min(len_left, len_right)):
left_value = left[i]
right_value = right[i]
- explanation += [
- "At index {} diff: {!r} != {!r}".format(i, left_value, right_value)
- ]
+ explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"]
break
if comparing_bytes:
extra = saferepr(right[len_left])
if len_diff == 1:
- explanation += [
- "{} contains one more item: {}".format(dir_with_more, extra)
- ]
+ explanation += [f"{dir_with_more} contains one more item: {extra}"]
else:
explanation += [
"%s contains %d more items, first extra item: %s"
def _compare_eq_dict(
left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
) -> List[str]:
- explanation = [] # type: List[str]
+ explanation: List[str] = []
set_left = set(left)
set_right = set(right)
common = set_left.intersection(set_right)
return explanation
-def _compare_eq_cls(
- left: Any,
- right: Any,
- verbose: int,
- type_fns: Tuple[Callable[[Any], bool], Callable[[Any], bool]],
-) -> List[str]:
- isdatacls, isattrs = type_fns
+def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]:
if isdatacls(left):
all_fields = left.__dataclass_fields__
fields_to_check = [field for field, info in all_fields.items() if info.compare]
elif isattrs(left):
all_fields = left.__attrs_attrs__
- fields_to_check = [
- field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD)
- ]
+ fields_to_check = [field.name for field in all_fields if getattr(field, "eq")]
+ elif isnamedtuple(left):
+ fields_to_check = left._fields
+ else:
+ assert False
indent = " "
same = []
# pytest-cache version.
import json
import os
+from pathlib import Path
from typing import Dict
from typing import Generator
from typing import Iterable
import attr
import py
-import pytest
-from .pathlib import Path
from .pathlib import resolve_from_str
from .pathlib import rm_rf
from .reports import CollectReport
from _pytest import nodes
from _pytest._io import TerminalWriter
from _pytest.compat import final
-from _pytest.compat import order_preserving_dict
from _pytest.config import Config
from _pytest.config import ExitCode
+from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
+from _pytest.deprecated import check_ispytest
+from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.main import Session
from _pytest.python import Module
@final
-@attr.s
+@attr.s(init=False)
class Cache:
_cachedir = attr.ib(type=Path, repr=False)
_config = attr.ib(type=Config, repr=False)
# sub-directory under cache-dir for values created by "set"
_CACHE_PREFIX_VALUES = "v"
+ def __init__(
+ self, cachedir: Path, config: Config, *, _ispytest: bool = False
+ ) -> None:
+ check_ispytest(_ispytest)
+ self._cachedir = cachedir
+ self._config = config
+
@classmethod
- def for_config(cls, config: Config) -> "Cache":
- cachedir = cls.cache_dir_from_config(config)
+ def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache":
+ """Create the Cache instance for a Config.
+
+ :meta private:
+ """
+ check_ispytest(_ispytest)
+ cachedir = cls.cache_dir_from_config(config, _ispytest=True)
if config.getoption("cacheclear") and cachedir.is_dir():
- cls.clear_cache(cachedir)
- return cls(cachedir, config)
+ cls.clear_cache(cachedir, _ispytest=True)
+ return cls(cachedir, config, _ispytest=True)
@classmethod
- def clear_cache(cls, cachedir: Path) -> None:
- """Clear the sub-directories used to hold cached directories and values."""
+ def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None:
+ """Clear the sub-directories used to hold cached directories and values.
+
+ :meta private:
+ """
+ check_ispytest(_ispytest)
for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
d = cachedir / prefix
if d.is_dir():
rm_rf(d)
@staticmethod
- def cache_dir_from_config(config: Config) -> Path:
+ def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path:
+ """Get the path to the cache directory for a Config.
+
+ :meta private:
+ """
+ check_ispytest(_ispytest)
return resolve_from_str(config.getini("cache_dir"), config.rootpath)
- def warn(self, fmt: str, **args: object) -> None:
+ def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None:
+ """Issue a cache warning.
+
+ :meta private:
+ """
+ check_ispytest(_ispytest)
import warnings
from _pytest.warning_types import PytestCacheWarning
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except OSError:
- self.warn("could not create cache path {path}", path=path)
+ self.warn("could not create cache path {path}", path=path, _ispytest=True)
return
if not cache_dir_exists_already:
self._ensure_supporting_files()
try:
f = path.open("w")
except OSError:
- self.warn("cache could not write path {path}", path=path)
+ self.warn("cache could not write path {path}", path=path, _ispytest=True)
else:
with f:
f.write(data)
self.lfplugin = lfplugin
self._collected_at_least_one_failure = False
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector: nodes.Collector):
if isinstance(collector, Session):
out = yield
- res = out.get_result() # type: CollectReport
+ res: CollectReport = out.get_result()
# Sort any lf-paths to the beginning.
lf_paths = self.lfplugin._last_failed_paths
def __init__(self, lfplugin: "LFPlugin") -> None:
self.lfplugin = lfplugin
- @pytest.hookimpl
+ @hookimpl
def pytest_make_collect_report(
self, collector: nodes.Collector
) -> Optional[CollectReport]:
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
assert config.cache
- self.lastfailed = config.cache.get(
- "cache/lastfailed", {}
- ) # type: Dict[str, bool]
- self._previously_failed_count = None # type: Optional[int]
- self._report_status = None # type: Optional[str]
+ self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {})
+ self._previously_failed_count: Optional[int] = None
+ self._report_status: Optional[str] = None
self._skipped_files = 0 # count skipped files during collection due to --lf
if config.getoption("lf"):
else:
self.lastfailed[report.nodeid] = True
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection_modifyitems(
self, config: Config, items: List[nodes.Item]
) -> Generator[None, None, None]:
assert config.cache is not None
self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection_modifyitems(
self, items: List[nodes.Item]
) -> Generator[None, None, None]:
yield
if self.active:
- new_items = order_preserving_dict() # type: Dict[str, nodes.Item]
- other_items = order_preserving_dict() # type: Dict[str, nodes.Item]
+ new_items: Dict[str, nodes.Item] = {}
+ other_items: Dict[str, nodes.Item] = {}
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
self.cached_nodeids.update(item.nodeid for item in items)
def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]:
- return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
+ return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) # type: ignore[no-any-return]
def pytest_sessionfinish(self) -> None:
config = self.config
return None
-@pytest.hookimpl(tryfirst=True)
+@hookimpl(tryfirst=True)
def pytest_configure(config: Config) -> None:
- config.cache = Cache.for_config(config)
+ config.cache = Cache.for_config(config, _ispytest=True)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
-@pytest.fixture
+@fixture
def cache(request: FixtureRequest) -> Cache:
"""Return a cache object that can persist state between testing sessions.
displaypath = cachedir.relative_to(config.rootpath)
except ValueError:
displaypath = cachedir
- return "cachedir: {}".format(displaypath)
+ return f"cachedir: {displaypath}"
return None
# print("%s/" % p.relto(basedir))
if p.is_file():
key = str(p.relative_to(basedir))
- tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
+ tw.line(f"{key} is a file of length {p.stat().st_size:d}")
return 0
from typing import Optional
from typing import TextIO
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
-import pytest
from _pytest.compat import final
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
+from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
+from _pytest.deprecated import check_ispytest
+from _pytest.fixtures import fixture
from _pytest.fixtures import SubRequest
from _pytest.nodes import Collector
+from _pytest.nodes import File
from _pytest.nodes import Item
if TYPE_CHECKING:
See https://github.com/pytest-dev/py/issues/103.
"""
- if (
- not sys.platform.startswith("win32")
- or sys.version_info[:2] < (3, 6)
- or hasattr(sys, "pypy_version_info")
- ):
+ if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"):
return
# Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666).
sys.stderr = _reopen_stdio(sys.stderr, "wb")
-@pytest.hookimpl(hookwrapper=True)
+@hookimpl(hookwrapper=True)
def pytest_load_initial_conftests(early_config: Config):
ns = early_config.known_args_namespace
if ns.capture == "fd":
# Further complications are the need to support suspend() and the
# possibility of FD reuse (e.g. the tmpfile getting the very same
# target FD). The following approach is robust, I believe.
- self.targetfd_invalid = os.open(
- os.devnull, os.O_RDWR
- ) # type: Optional[int]
+ self.targetfd_invalid: Optional[int] = os.open(os.devnull, os.O_RDWR)
os.dup2(self.targetfd_invalid, targetfd)
else:
self.targetfd_invalid = None
self.syscapture = SysCapture(targetfd)
else:
self.tmpfile = EncodedFile(
- # TODO: Remove type ignore, fixed in next mypy release.
- TemporaryFile(buffering=0), # type: ignore[arg-type]
+ TemporaryFile(buffering=0),
encoding="utf-8",
errors="replace",
newline="",
class CaptureResult(Generic[AnyStr]):
"""The result of :method:`CaptureFixture.readouterr`."""
- # Can't use slots in Python<3.5.3 due to https://bugs.python.org/issue31272
- if sys.version_info >= (3, 5, 3):
- __slots__ = ("out", "err")
+ __slots__ = ("out", "err")
def __init__(self, out: AnyStr, err: AnyStr) -> None:
- self.out = out # type: AnyStr
- self.err = err # type: AnyStr
+ self.out: AnyStr = out
+ self.err: AnyStr = err
def __len__(self) -> int:
return 2
return tuple(self) < tuple(other)
def __repr__(self) -> str:
- return "CaptureResult(out={!r}, err={!r})".format(self.out, self.err)
+ return f"CaptureResult(out={self.out!r}, err={self.err!r})"
class MultiCapture(Generic[AnyStr]):
return MultiCapture(
in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True)
)
- raise ValueError("unknown capturing method: {!r}".format(method))
+ raise ValueError(f"unknown capturing method: {method!r}")
# CaptureManager and CaptureFixture
def __init__(self, method: "_CaptureMethod") -> None:
self._method = method
- self._global_capturing = None # type: Optional[MultiCapture[str]]
- self._capture_fixture = None # type: Optional[CaptureFixture[Any]]
+ self._global_capturing: Optional[MultiCapture[str]] = None
+ self._capture_fixture: Optional[CaptureFixture[Any]] = None
def __repr__(self) -> str:
return "<CaptureManager _method={!r} _global_capturing={!r} _capture_fixture={!r}>".format(
# Hooks
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector: Collector):
- if isinstance(collector, pytest.File):
+ if isinstance(collector, File):
self.resume_global_capture()
outcome = yield
self.suspend_global_capture()
else:
yield
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]:
with self.item_capture("setup", item):
yield
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]:
with self.item_capture("call", item):
yield
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]:
with self.item_capture("teardown", item):
yield
- @pytest.hookimpl(tryfirst=True)
+ @hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(self) -> None:
self.stop_global_capturing()
- @pytest.hookimpl(tryfirst=True)
+ @hookimpl(tryfirst=True)
def pytest_internalerror(self) -> None:
self.stop_global_capturing()
class CaptureFixture(Generic[AnyStr]):
- """Object returned by the :py:func:`capsys`, :py:func:`capsysbinary`,
- :py:func:`capfd` and :py:func:`capfdbinary` fixtures."""
+ """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`,
+ :fixture:`capfd` and :fixture:`capfdbinary` fixtures."""
- def __init__(self, captureclass, request: SubRequest) -> None:
+ def __init__(
+ self, captureclass, request: SubRequest, *, _ispytest: bool = False
+ ) -> None:
+ check_ispytest(_ispytest)
self.captureclass = captureclass
self.request = request
- self._capture = None # type: Optional[MultiCapture[AnyStr]]
+ self._capture: Optional[MultiCapture[AnyStr]] = None
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
# The fixtures.
-@pytest.fixture
+@fixture
def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
``out`` and ``err`` will be ``text`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[str](SysCapture, request)
+ capture_fixture = CaptureFixture[str](SysCapture, request, _ispytest=True)
capman.set_fixture(capture_fixture)
capture_fixture._start()
yield capture_fixture
capman.unset_fixture()
-@pytest.fixture
+@fixture
def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
``out`` and ``err`` will be ``bytes`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request)
+ capture_fixture = CaptureFixture[bytes](SysCaptureBinary, request, _ispytest=True)
capman.set_fixture(capture_fixture)
capture_fixture._start()
yield capture_fixture
capman.unset_fixture()
-@pytest.fixture
+@fixture
def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]:
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
``out`` and ``err`` will be ``text`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[str](FDCapture, request)
+ capture_fixture = CaptureFixture[str](FDCapture, request, _ispytest=True)
capman.set_fixture(capture_fixture)
capture_fixture._start()
yield capture_fixture
capman.unset_fixture()
-@pytest.fixture
+@fixture
def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]:
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
``out`` and ``err`` will be ``byte`` objects.
"""
capman = request.config.pluginmanager.getplugin("capturemanager")
- capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request)
+ capture_fixture = CaptureFixture[bytes](FDCaptureBinary, request, _ispytest=True)
capman.set_fixture(capture_fixture)
capture_fixture._start()
yield capture_fixture
import enum
import functools
import inspect
-import os
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
+from pathlib import Path
from typing import Any
from typing import Callable
from typing import Generic
from typing import Optional
-from typing import overload as overload
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
-if sys.version_info < (3, 5, 2):
- TYPE_CHECKING = False # type: bool
-else:
- from typing import TYPE_CHECKING
-
-
if TYPE_CHECKING:
from typing import NoReturn
- from typing import Type
from typing_extensions import Final
# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
class NotSetType(enum.Enum):
token = 0
-NOTSET = NotSetType.token # type: Final # noqa: E305
+NOTSET: "Final" = NotSetType.token # noqa: E305
# fmt: on
-MODULE_NOT_FOUND_ERROR = (
- "ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
-)
-
-
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
REGEX_TYPE = type(re.compile(""))
-if sys.version_info < (3, 6):
-
- def fspath(p):
- """os.fspath replacement, useful to point out when we should replace it by the
- real function once we drop py35."""
- return str(p)
-
-
-else:
- fspath = os.fspath
-
-
def is_generator(func: object) -> bool:
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
- return iscoroutinefunction(func) or (
- sys.version_info >= (3, 6) and inspect.isasyncgenfunction(func)
- )
+ return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
def getlocation(function, curdir: Optional[str] = None) -> str:
- from _pytest.pathlib import Path
-
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
*,
name: str = "",
is_method: bool = False,
- cls: Optional[type] = None
+ cls: Optional[type] = None,
) -> Tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
- "Could not determine arguments of {!r}: {}".format(function, e),
- pytrace=False,
+ f"Could not determine arguments of {function!r}: {e}", pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
- # TODO: Remove type ignore after https://github.com/python/typeshed/pull/4383
- p.kind is Parameter.POSITIONAL_OR_KEYWORD # type: ignore[unreachable]
- or p.kind is Parameter.KEYWORD_ONLY # type: ignore[unreachable]
+ p.kind is Parameter.POSITIONAL_OR_KEYWORD
+ or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
_non_printable_ascii_translate_table = {
- i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
+ i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
return False
-if sys.version_info < (3, 5, 2):
-
- def overload(f): # noqa: F811
- return f
-
-
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import final as final
from typing import final as final
else:
- def final(f): # noqa: F811
+ def final(f):
return f
-if getattr(attr, "__version_info__", ()) >= (19, 2):
- ATTRS_EQ_FIELD = "eq"
-else:
- ATTRS_EQ_FIELD = "cmp"
-
-
if sys.version_info >= (3, 8):
from functools import cached_property as cached_property
else:
+ from typing import overload
+ from typing import Type
class cached_property(Generic[_S, _T]):
__slots__ = ("func", "__doc__")
@overload
def __get__(
- self, instance: None, owner: Optional["Type[_S]"] = ...
+ self, instance: None, owner: Optional[Type[_S]] = ...
) -> "cached_property[_S, _T]":
...
- @overload # noqa: F811
- def __get__( # noqa: F811
- self, instance: _S, owner: Optional["Type[_S]"] = ...
- ) -> _T:
+ @overload
+ def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T:
...
- def __get__(self, instance, owner=None): # noqa: F811
+ def __get__(self, instance, owner=None):
if instance is None:
return self
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
-# Sometimes an algorithm needs a dict which yields items in the order in which
-# they were inserted when iterated. Since Python 3.7, `dict` preserves
-# insertion order. Since `dict` is faster and uses less memory than
-# `OrderedDict`, prefer to use it if possible.
-if sys.version_info >= (3, 7):
- order_preserving_dict = dict
-else:
- from collections import OrderedDict
-
- order_preserving_dict = OrderedDict
-
-
# Perform exhaustiveness checking.
#
# Consider this example:
import types
import warnings
from functools import lru_cache
+from pathlib import Path
from types import TracebackType
from typing import Any
from typing import Callable
from typing import Set
from typing import TextIO
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
import attr
from _pytest._io import TerminalWriter
from _pytest.compat import final
from _pytest.compat import importlib_metadata
-from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
from _pytest.outcomes import Skipped
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import import_path
from _pytest.pathlib import ImportMode
-from _pytest.pathlib import Path
from _pytest.store import Store
from _pytest.warning_types import PytestConfigWarning
if TYPE_CHECKING:
- from typing import Type
from _pytest._code.code import _TracebackStyle
from _pytest.terminal import TerminalReporter
def __init__(
self,
path: py.path.local,
- excinfo: Tuple["Type[Exception]", Exception, TracebackType],
+ excinfo: Tuple[Type[Exception], Exception, TracebackType],
) -> None:
super().__init__(path, excinfo)
self.path = path
except ConftestImportFailure as e:
exc_info = ExceptionInfo(e.excinfo)
tw = TerminalWriter(sys.stderr)
- tw.line(
- "ImportError while loading conftest '{e.path}'.".format(e=e), red=True
- )
+ tw.line(f"ImportError while loading conftest '{e.path}'.", red=True)
exc_info.traceback = exc_info.traceback.filter(
filter_traceback_for_conftest_import_failure
)
return ExitCode.USAGE_ERROR
else:
try:
- ret = config.hook.pytest_cmdline_main(
+ ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main(
config=config
- ) # type: Union[ExitCode, int]
+ )
try:
return ExitCode(ret)
except ValueError:
except UsageError as e:
tw = TerminalWriter(sys.stderr)
for msg in e.args:
- tw.line("ERROR: {}\n".format(msg), red=True)
+ tw.line(f"ERROR: {msg}\n", red=True)
return ExitCode.USAGE_ERROR
:optname: Name of the option.
"""
if os.path.isdir(path):
- raise UsageError("{} must be a filename, given: {}".format(optname, path))
+ raise UsageError(f"{optname} must be a filename, given: {path}")
return path
:optname: Name of the option.
"""
if not os.path.isdir(path):
- raise UsageError("{} must be a directory, given: {}".format(optname, path))
+ raise UsageError(f"{optname} must be a directory, given: {path}")
return path
"warnings",
"logging",
"reports",
+ *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []),
"faulthandler",
)
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
+builtin_plugins.add("pytester_assertions")
def get_config(
super().__init__("pytest")
# The objects are module objects, only used generically.
- self._conftest_plugins = set() # type: Set[types.ModuleType]
+ self._conftest_plugins: Set[types.ModuleType] = set()
# State related to local conftest plugins.
- self._dirpath2confmods = {} # type: Dict[py.path.local, List[types.ModuleType]]
- self._conftestpath2mod = {} # type: Dict[Path, types.ModuleType]
- self._confcutdir = None # type: Optional[py.path.local]
+ self._dirpath2confmods: Dict[py.path.local, List[types.ModuleType]] = {}
+ self._conftestpath2mod: Dict[Path, types.ModuleType] = {}
+ self._confcutdir: Optional[py.path.local] = None
self._noconftest = False
- self._duplicatepaths = set() # type: Set[py.path.local]
+ self._duplicatepaths: Set[py.path.local] = set()
# plugins that were explicitly skipped with pytest.skip
# list of (module name, skip reason)
# previously we would issue a warning when a plugin was skipped, but
# since we refactored warnings as first citizens of Config, they are
# just stored here to be used later.
- self.skipped_plugins = [] # type: List[Tuple[str, str]]
+ self.skipped_plugins: List[Tuple[str, str]] = []
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get("PYTEST_DEBUG"):
- err = sys.stderr # type: IO[str]
- encoding = getattr(err, "encoding", "utf8") # type: str
+ err: IO[str] = sys.stderr
+ encoding: str = getattr(err, "encoding", "utf8")
try:
err = open(
os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding,
)
)
return None
- ret = super().register(plugin, name) # type: Optional[str]
+ ret: Optional[str] = super().register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self)
def getplugin(self, name: str):
# Support deprecated naming because plugins (xdist e.g.) use it.
- plugin = self.get_plugin(name) # type: Optional[_PluggyPlugin]
+ plugin: Optional[_PluggyPlugin] = self.get_plugin(name)
return plugin
def hasplugin(self, name: str) -> bool:
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
- self.trace("loading conftestmodule {!r}".format(mod))
+ self.trace(f"loading conftestmodule {mod!r}")
self.consider_conftest(mod)
return mod
self,
pluginmanager: PytestPluginManager,
*,
- invocation_params: Optional[InvocationParams] = None
+ invocation_params: Optional[InvocationParams] = None,
) -> None:
from .argparsing import Parser, FILE_OR_DIR
_a = FILE_OR_DIR
self._parser = Parser(
- usage="%(prog)s [options] [{}] [{}] [...]".format(_a, _a),
+ usage=f"%(prog)s [options] [{_a}] [{_a}] [...]",
processopt=self._processopt,
)
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
- self._inicache = {} # type: Dict[str, Any]
- self._override_ini = () # type: Sequence[str]
- self._opt2dest = {} # type: Dict[str, str]
- self._cleanup = [] # type: List[Callable[[], None]]
+ self._inicache: Dict[str, Any] = {}
+ self._override_ini: Sequence[str] = ()
+ self._opt2dest: Dict[str, str] = {}
+ self._cleanup: List[Callable[[], None]] = []
# A place where plugins can store information on the config for their
# own use. Currently only intended for internal plugins.
self._store = Store()
if TYPE_CHECKING:
from _pytest.cacheprovider import Cache
- self.cache = None # type: Optional[Cache]
+ self.cache: Optional[Cache] = None
@property
def invocation_dir(self) -> py.path.local:
fin()
def get_terminal_writer(self) -> TerminalWriter:
- terminalreporter = self.pluginmanager.get_plugin(
+ terminalreporter: TerminalReporter = self.pluginmanager.get_plugin(
"terminalreporter"
- ) # type: TerminalReporter
+ )
return terminalreporter._tw
def pytest_cmdline_parse(
option: Optional[argparse.Namespace] = None,
) -> None:
if option and getattr(option, "fulltrace", False):
- style = "long" # type: _TracebackStyle
+ style: _TracebackStyle = "long"
else:
style = "native"
excrepr = excinfo.getrepr(
self._validate_plugins()
self._warn_about_skipped_plugins()
+ if self.known_args_namespace.strict:
+ self.issue_config_time_warning(
+ _pytest.deprecated.STRICT_OPTION, stacklevel=2
+ )
+
if self.known_args_namespace.confcutdir is None and self.inipath is not None:
confcutdir = str(self.inipath.parent)
self.known_args_namespace.confcutdir = confcutdir
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self.issue_config_time_warning(
- PytestConfigWarning(
- "could not load initial conftests: {}".format(e.path)
- ),
+ PytestConfigWarning(f"could not load initial conftests: {e.path}"),
stacklevel=2,
)
else:
def _validate_config_options(self) -> None:
for key in sorted(self._get_unknown_ini_keys()):
- self._warn_or_fail_if_strict("Unknown config option: {}\n".format(key))
+ self._warn_or_fail_if_strict(f"Unknown config option: {key}\n")
def _validate_plugins(self) -> None:
required_plugins = sorted(self.getini("required_plugins"))
try:
description, type, default = self._parser._inidict[name]
except KeyError as e:
- raise ValueError("unknown configuration value: {!r}".format(name)) from e
+ raise ValueError(f"unknown configuration value: {name!r}") from e
override_value = self._get_override_ini_value(name)
if override_value is None:
try:
elif type == "bool":
return _strtobool(str(value).strip())
else:
- assert type is None
+ assert type in [None, "string"]
return value
def _getconftest_pathlist(
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
- values = [] # type: List[py.path.local]
+ values: List[py.path.local] = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", os.sep)
if skip:
import pytest
- pytest.skip("no {!r} option found".format(name))
- raise ValueError("no option named {!r}".format(name)) from e
+ pytest.skip(f"no {name!r} option found")
+ raise ValueError(f"no option named {name!r}") from e
def getvalue(self, name: str, path=None):
"""Deprecated, use getoption() instead."""
def _warn_about_skipped_plugins(self) -> None:
for module_name, msg in self.pluginmanager.skipped_plugins:
self.issue_config_time_warning(
- PytestConfigWarning("skipped plugin {!r}: {}".format(module_name, msg)),
+ PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"),
stacklevel=2,
)
elif val in ("n", "no", "f", "false", "off", "0"):
return False
else:
- raise ValueError("invalid truth value {!r}".format(val))
+ raise ValueError(f"invalid truth value {val!r}")
@lru_cache(maxsize=50)
def parse_warning_filter(
arg: str, *, escape: bool
-) -> "Tuple[str, str, Type[Warning], str, int]":
+) -> Tuple[str, str, Type[Warning], str, int]:
"""Parse a warnings filter string.
This is copied from warnings._setoption, but does not apply the filter,
"""
parts = arg.split(":")
if len(parts) > 5:
- raise warnings._OptionError("too many fields (max 5): {!r}".format(arg))
+ raise warnings._OptionError(f"too many fields (max 5): {arg!r}")
while len(parts) < 5:
parts.append("")
action_, message, category_, module, lineno_ = [s.strip() for s in parts]
- action = warnings._getaction(action_) # type: str # type: ignore[attr-defined]
- category = warnings._getcategory(
- category_
- ) # type: Type[Warning] # type: ignore[attr-defined]
+ action: str = warnings._getaction(action_) # type: ignore[attr-defined]
+ category: Type[Warning] = warnings._getcategory(category_) # type: ignore[attr-defined]
if message and escape:
message = re.escape(message)
if module and escape:
if lineno < 0:
raise ValueError
except (ValueError, OverflowError) as e:
- raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) from e
+ raise warnings._OptionError(f"invalid lineno {lineno_!r}") from e
else:
lineno = 0
return action, message, category, module, lineno
from typing import Optional
from typing import Sequence
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import py
import _pytest._io
from _pytest.compat import final
-from _pytest.compat import TYPE_CHECKING
from _pytest.config.exceptions import UsageError
if TYPE_CHECKING:
there's an error processing the command line arguments.
"""
- prog = None # type: Optional[str]
+ prog: Optional[str] = None
def __init__(
self,
processopt: Optional[Callable[["Argument"], None]] = None,
) -> None:
self._anonymous = OptionGroup("custom options", parser=self)
- self._groups = [] # type: List[OptionGroup]
+ self._groups: List[OptionGroup] = []
self._processopt = processopt
self._usage = usage
- self._inidict = {} # type: Dict[str, Tuple[str, Optional[str], Any]]
- self._ininames = [] # type: List[str]
- self.extra_info = {} # type: Dict[str, Any]
+ self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {}
+ self._ininames: List[str] = []
+ self.extra_info: Dict[str, Any] = {}
def processoption(self, option: "Argument") -> None:
if self._processopt:
self,
name: str,
help: str,
- type: Optional["Literal['pathlist', 'args', 'linelist', 'bool']"] = None,
+ type: Optional[
+ "Literal['string', 'pathlist', 'args', 'linelist', 'bool']"
+ ] = None,
default=None,
) -> None:
"""Register an ini-file option.
:name: Name of the ini-variable.
- :type: Type of the variable, can be ``pathlist``, ``args``, ``linelist``
- or ``bool``.
+ :type: Type of the variable, can be ``string``, ``pathlist``, ``args``,
+ ``linelist`` or ``bool``. Defaults to ``string`` if ``None`` or
+ not passed.
:default: Default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
- assert type in (None, "pathlist", "args", "linelist", "bool")
+ assert type in (None, "string", "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
def __str__(self) -> str:
if self.option_id:
- return "option {}: {}".format(self.option_id, self.msg)
+ return f"option {self.option_id}: {self.msg}"
else:
return self.msg
def __init__(self, *names: str, **attrs: Any) -> None:
"""Store parms in private vars for use in add_argument."""
self._attrs = attrs
- self._short_opts = [] # type: List[str]
- self._long_opts = [] # type: List[str]
+ self._short_opts: List[str] = []
+ self._long_opts: List[str] = []
if "%default" in (attrs.get("help") or ""):
warnings.warn(
'pytest now uses argparse. "%default" should be'
except KeyError:
pass
self._set_opt_strings(names)
- dest = attrs.get("dest") # type: Optional[str]
+ dest: Optional[str] = attrs.get("dest")
if dest:
self.dest = dest
elif self._long_opts:
self._long_opts.append(opt)
def __repr__(self) -> str:
- args = [] # type: List[str]
+ args: List[str] = []
if self._short_opts:
args += ["_short_opts: " + repr(self._short_opts)]
if self._long_opts:
) -> None:
self.name = name
self.description = description
- self.options = [] # type: List[Argument]
+ self.options: List[Argument] = []
self.parser = parser
def addoption(self, *optnames: str, **attrs: Any) -> None:
def error(self, message: str) -> "NoReturn":
"""Transform argparse error message into UsageError."""
- msg = "{}: error: {}".format(self.prog, message)
+ msg = f"{self.prog}: error: {message}"
if hasattr(self._parser, "_config_source_hint"):
# Type ignored because the attribute is set dynamically.
- msg = "{} ({})".format(msg, self._parser._config_source_hint) # type: ignore
+ msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore
raise UsageError(self.format_usage() + msg)
if arg and arg[0] == "-":
lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
for k, v in sorted(self.extra_info.items()):
- lines.append(" {}: {}".format(k, v))
+ lines.append(f" {k}: {v}")
self.error("\n".join(lines))
getattr(parsed, FILE_OR_DIR).extend(unrecognized)
return parsed
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != "-": # only optional arguments
return orgstr
- res = getattr(
- action, "_formatted_action_invocation", None
- ) # type: Optional[str]
+ res: Optional[str] = getattr(action, "_formatted_action_invocation", None)
if res:
return res
options = orgstr.split(", ")
action._formatted_action_invocation = orgstr # type: ignore
return orgstr
return_list = []
- short_long = {} # type: Dict[str, str]
+ short_long: Dict[str, str] = {}
for option in options:
if len(option) == 2 or option[2] == " ":
continue
import os
+from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import iniconfig
from .exceptions import UsageError
-from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
from _pytest.pathlib import absolutepath
from _pytest.pathlib import commonpath
-from _pytest.pathlib import Path
if TYPE_CHECKING:
from . import Config
Raise UsageError if the file cannot be parsed.
"""
try:
- return iniconfig.IniConfig(path)
+ return iniconfig.IniConfig(str(path))
except iniconfig.ParseError as exc:
raise UsageError(str(exc)) from exc
def get_common_ancestor(paths: Iterable[Path]) -> Path:
- common_ancestor = None # type: Optional[Path]
+ common_ancestor: Optional[Path] = None
for path in paths:
if not path.exists():
continue
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = absolutepath(inifile)
- inipath = inipath_ # type: Optional[Path]
+ inipath: Optional[Path] = inipath_
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
from typing import List
from typing import Optional
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
from _pytest import outcomes
from _pytest._code import ExceptionInfo
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import ConftestImportFailure
from _pytest.config import hookimpl
from _pytest.reports import BaseReport
if TYPE_CHECKING:
- from typing import Type
-
from _pytest.capture import CaptureManager
from _pytest.runner import CallInfo
modname, classname = value.split(":")
except ValueError as e:
raise argparse.ArgumentTypeError(
- "{!r} is not in the format 'modname:classname'".format(value)
+ f"{value!r} is not in the format 'modname:classname'"
) from e
return (modname, classname)
class pytestPDB:
"""Pseudo PDB that defers to the real pdb."""
- _pluginmanager = None # type: Optional[PytestPluginManager]
- _config = None # type: Config
- _saved = (
- []
- ) # type: List[Tuple[Callable[..., None], Optional[PytestPluginManager], Config]]
+ _pluginmanager: Optional[PytestPluginManager] = None
+ _config: Optional[Config] = None
+ _saved: List[
+ Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]]
+ ] = []
_recursive_debug = 0
- _wrapped_pdb_cls = None # type: Optional[Tuple[Type[Any], Type[Any]]]
+ _wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None
@classmethod
def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]:
except Exception as exc:
value = ":".join((modname, classname))
raise UsageError(
- "--pdbcls: could not import {!r}: {}".format(value, exc)
+ f"--pdbcls: could not import {value!r}: {exc}"
) from exc
else:
import pdb
def do_continue(self, arg):
ret = super().do_continue(arg)
if cls._recursive_debug == 0:
+ assert cls._config is not None
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
import _pytest.config
if cls._pluginmanager is None:
- capman = None # type: Optional[CaptureManager]
+ capman: Optional[CaptureManager] = None
else:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
else:
capturing = cls._is_capturing(capman)
if capturing == "global":
- tw.sep(">", "PDB {} (IO-capturing turned off)".format(method))
+ tw.sep(">", f"PDB {method} (IO-capturing turned off)")
elif capturing:
tw.sep(
">",
% (method, capturing),
)
else:
- tw.sep(">", "PDB {}".format(method))
+ tw.sep(">", f"PDB {method}")
_pdb = cls._import_pdb_cls(capman)(**kwargs)
:class:`PytestWarning`, or :class:`UnformattedWarning`
in case of warnings which need to format their messages.
"""
+from warnings import warn
+
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import UnformattedWarning
"Please update to the new name.",
)
+YIELD_FIXTURE = PytestDeprecationWarning(
+ "@pytest.yield_fixture is deprecated.\n"
+ "Use @pytest.fixture instead; they are the same."
+)
MINUS_K_DASH = PytestDeprecationWarning(
"The `-k '-expr'` syntax to -k is deprecated.\nUse `-k 'not expr'` instead."
"The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; "
"use self.session.gethookproxy() and self.session.isinitpath() instead. "
)
+
+STRICT_OPTION = PytestDeprecationWarning(
+ "The --strict option is deprecated, use --strict-markers instead."
+)
+
+PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.")
+
+
+# You want to make some `__init__` or function "private".
+#
+# def my_private_function(some, args):
+# ...
+#
+# Do this:
+#
+# def my_private_function(some, args, *, _ispytest: bool = False):
+# check_ispytest(_ispytest)
+# ...
+#
+# Change all internal/allowed calls to
+#
+# my_private_function(some, args, _ispytest=True)
+#
+# All other calls will get the default _ispytest=False and trigger
+# the warning (possibly error in the future).
+def check_ispytest(ispytest: bool) -> None:
+ if not ispytest:
+ warn(PRIVATE, stacklevel=3)
from typing import Pattern
from typing import Sequence
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
import py.path
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import safe_getattr
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureRequest
if TYPE_CHECKING:
import doctest
- from typing import Type
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
# Lazy definition of runner class
RUNNER_CLASS = None
# Lazy definition of output checker class
-CHECKER_CLASS = None # type: Optional[Type[doctest.OutputChecker]]
+CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None
def pytest_addoption(parser: Parser) -> None:
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(path):
- mod = DoctestModule.from_parent(parent, fspath=path) # type: DoctestModule
+ mod: DoctestModule = DoctestModule.from_parent(parent, fspath=path)
return mod
elif _is_doctest(config, path, parent):
- txt = DoctestTextfile.from_parent(parent, fspath=path) # type: DoctestTextfile
+ txt: DoctestTextfile = DoctestTextfile.from_parent(parent, fspath=path)
return txt
return None
class MultipleDoctestFailures(Exception):
- def __init__(self, failures: "Sequence[doctest.DocTestFailure]") -> None:
+ def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None:
super().__init__()
self.failures = failures
-def _init_runner_class() -> "Type[doctest.DocTestRunner]":
+def _init_runner_class() -> Type["doctest.DocTestRunner"]:
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
def __init__(
self,
- checker: Optional[doctest.OutputChecker] = None,
+ checker: Optional["doctest.OutputChecker"] = None,
verbose: Optional[bool] = None,
optionflags: int = 0,
continue_on_failure: bool = True,
out,
test: "doctest.DocTest",
example: "doctest.Example",
- exc_info: "Tuple[Type[BaseException], BaseException, types.TracebackType]",
+ exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType],
) -> None:
if isinstance(exc_info[1], OutcomeException):
raise exc_info[1]
self.runner = runner
self.dtest = dtest
self.obj = None
- self.fixture_request = None # type: Optional[FixtureRequest]
+ self.fixture_request: Optional[FixtureRequest] = None
@classmethod
def from_parent( # type: ignore
*,
name: str,
runner: "doctest.DocTestRunner",
- dtest: "doctest.DocTest"
+ dtest: "doctest.DocTest",
):
# incompatible signature due to to imposed limits on sublcass
"""The public named constructor."""
assert self.runner is not None
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
- failures = [] # type: List[doctest.DocTestFailure]
+ failures: List["doctest.DocTestFailure"] = []
# Type ignored because we change the type of `out` from what
# doctest expects.
self.runner.run(self.dtest, out=failures) # type: ignore[arg-type]
) -> Union[str, TerminalRepr]:
import doctest
- failures = (
- None
- ) # type: Optional[Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]]
+ failures: Optional[
+ Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]
+ ] = (None)
if isinstance(
excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
):
]
indent = ">>>"
for line in example.source.splitlines():
- lines.append("??? {} {}".format(indent, line))
+ lines.append(f"??? {indent} {line}")
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined]
node=doctest_item, func=func, cls=None, funcargs=False
)
- fixture_request = FixtureRequest(doctest_item)
+ fixture_request = FixtureRequest(doctest_item, _ispytest=True)
fixture_request._fillfixtures()
return fixture_request
-def _init_checker_class() -> "Type[doctest.OutputChecker]":
+def _init_checker_class() -> Type["doctest.OutputChecker"]:
import doctest
import re
return got
offset = 0
for w, g in zip(wants, gots):
- fraction = w.group("fraction") # type: Optional[str]
- exponent = w.group("exponent1") # type: Optional[str]
+ fraction: Optional[str] = w.group("fraction")
+ exponent: Optional[str] = w.group("exponent1")
if exponent is None:
exponent = w.group("exponent2")
if fraction is None:
from typing import Iterator
from typing import List
from typing import Optional
+from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import py
import _pytest
+from _pytest import nodes
from _pytest._code import getfslineno
from _pytest._code.code import FormattedExcinfo
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
+from _pytest.compat import assert_never
from _pytest.compat import final
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import NOTSET
-from _pytest.compat import order_preserving_dict
-from _pytest.compat import overload
from _pytest.compat import safe_getattr
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config.argparsing import Parser
+from _pytest.deprecated import check_ispytest
from _pytest.deprecated import FILLFUNCARGS
+from _pytest.deprecated import YIELD_FIXTURE
from _pytest.mark import Mark
from _pytest.mark import ParameterSet
+from _pytest.mark.structures import MarkDecorator
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
from _pytest.pathlib import absolutepath
+from _pytest.store import StoreKey
if TYPE_CHECKING:
from typing import Deque
from typing import NoReturn
- from typing import Type
from typing_extensions import Literal
- from _pytest import nodes
from _pytest.main import Session
from _pytest.python import CallSpec2
from _pytest.python import Function
# Cache key.
object,
# Exc info if raised.
- Tuple["Type[BaseException]", BaseException, TracebackType],
+ Tuple[Type[BaseException], BaseException, TracebackType],
],
]
def pytest_sessionstart(session: "Session") -> None:
- import _pytest.python
- import _pytest.nodes
-
- scopename2class.update(
- {
- "package": _pytest.python.Package,
- "class": _pytest.python.Class,
- "module": _pytest.python.Module,
- "function": _pytest.nodes.Item,
- "session": _pytest.main.Session,
- }
- )
session._fixturemanager = FixtureManager(session)
-scopename2class = {} # type: Dict[str, Type[nodes.Node]]
-
-
def get_scope_package(node, fixturedef: "FixtureDef[object]"):
import pytest
return current
-def get_scope_node(node, scope):
- cls = scopename2class.get(scope)
- if cls is None:
- raise ValueError("unknown scope")
- return node.getparent(cls)
+def get_scope_node(
+ node: nodes.Node, scope: "_Scope"
+) -> Optional[Union[nodes.Item, nodes.Collector]]:
+ import _pytest.python
+
+ if scope == "function":
+ return node.getparent(nodes.Item)
+ elif scope == "class":
+ return node.getparent(_pytest.python.Class)
+ elif scope == "module":
+ return node.getparent(_pytest.python.Module)
+ elif scope == "package":
+ return node.getparent(_pytest.python.Package)
+ elif scope == "session":
+ return node.getparent(_pytest.main.Session)
+ else:
+ assert_never(scope)
+
+
+# Used for storing artificial fixturedefs for direct parametrization.
+name2pseudofixturedef_key = StoreKey[Dict[str, "FixtureDef[Any]"]]()
def add_funcarg_pseudo_fixture_def(
- collector, metafunc: "Metafunc", fixturemanager: "FixtureManager"
+ collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager"
) -> None:
# This function will transform all collected calls to functions
# if they use direct funcargs (i.e. direct parametrization)
# This function call does not have direct parametrization.
return
# Collect funcargs of all callspecs into a list of values.
- arg2params = {} # type: Dict[str, List[object]]
- arg2scope = {} # type: Dict[str, _Scope]
+ arg2params: Dict[str, List[object]] = {}
+ arg2scope: Dict[str, _Scope] = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# Use module-level collector for class-scope (for now).
node = collector
- if node and argname in node._name2pseudofixturedef:
- arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
+ if node is None:
+ name2pseudofixturedef = None
+ else:
+ default: Dict[str, FixtureDef[Any]] = {}
+ name2pseudofixturedef = node._store.setdefault(
+ name2pseudofixturedef_key, default
+ )
+ if name2pseudofixturedef is not None and argname in name2pseudofixturedef:
+ arg2fixturedefs[argname] = [name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager=fixturemanager,
ids=None,
)
arg2fixturedefs[argname] = [fixturedef]
- if node is not None:
- node._name2pseudofixturedef[argname] = fixturedef
+ if name2pseudofixturedef is not None:
+ name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
"""Return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
- fixturemarker = getattr(
+ fixturemarker: Optional[FixtureFunctionMarker] = getattr(
obj, "_pytestfixturefunction", None
- ) # type: Optional[FixtureFunctionMarker]
+ )
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
_Key = Tuple[object, ...]
-def get_parametrized_fixture_keys(item: "nodes.Item", scopenum: int) -> Iterator[_Key]:
+def get_parametrized_fixture_keys(item: nodes.Item, scopenum: int) -> Iterator[_Key]:
"""Return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
except AttributeError:
pass
else:
- cs = callspec # type: CallSpec2
+ cs: CallSpec2 = callspec
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
- key = (argname, param_index) # type: _Key
+ key: _Key = (argname, param_index)
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
# setups and teardowns.
-def reorder_items(items: "Sequence[nodes.Item]") -> "List[nodes.Item]":
- argkeys_cache = {} # type: Dict[int, Dict[nodes.Item, Dict[_Key, None]]]
- items_by_argkey = {} # type: Dict[int, Dict[_Key, Deque[nodes.Item]]]
+def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]:
+ argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]] = {}
+ items_by_argkey: Dict[int, Dict[_Key, Deque[nodes.Item]]] = {}
for scopenum in range(0, scopenum_function):
- d = {} # type: Dict[nodes.Item, Dict[_Key, None]]
+ d: Dict[nodes.Item, Dict[_Key, None]] = {}
argkeys_cache[scopenum] = d
- item_d = defaultdict(deque) # type: Dict[_Key, Deque[nodes.Item]]
+ item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque)
items_by_argkey[scopenum] = item_d
for item in items:
- # cast is a workaround for https://github.com/python/typeshed/issues/3800.
- keys = cast(
- "Dict[_Key, None]",
- order_preserving_dict.fromkeys(
- get_parametrized_fixture_keys(item, scopenum), None
- ),
- )
+ keys = dict.fromkeys(get_parametrized_fixture_keys(item, scopenum), None)
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
- # cast is a workaround for https://github.com/python/typeshed/issues/3800.
- items_dict = cast(
- "Dict[nodes.Item, None]", order_preserving_dict.fromkeys(items, None)
- )
+ items_dict = dict.fromkeys(items, None)
return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(
- item: "nodes.Item",
- argkeys_cache: "Dict[int, Dict[nodes.Item, Dict[_Key, None]]]",
- items_by_argkey: "Dict[int, Dict[_Key, Deque[nodes.Item]]]",
+ item: nodes.Item,
+ argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]],
+ items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]],
) -> None:
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
def reorder_items_atscope(
- items: "Dict[nodes.Item, None]",
- argkeys_cache: "Dict[int, Dict[nodes.Item, Dict[_Key, None]]]",
- items_by_argkey: "Dict[int, Dict[_Key, Deque[nodes.Item]]]",
+ items: Dict[nodes.Item, None],
+ argkeys_cache: Dict[int, Dict[nodes.Item, Dict[_Key, None]]],
+ items_by_argkey: Dict[int, Dict[_Key, "Deque[nodes.Item]"]],
scopenum: int,
-) -> "Dict[nodes.Item, None]":
+) -> Dict[nodes.Item, None]:
if scopenum >= scopenum_function or len(items) < 3:
return items
- ignore = set() # type: Set[Optional[_Key]]
+ ignore: Set[Optional[_Key]] = set()
items_deque = deque(items)
- items_done = order_preserving_dict() # type: Dict[nodes.Item, None]
+ items_done: Dict[nodes.Item, None] = {}
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
- no_argkey_group = order_preserving_dict() # type: Dict[nodes.Item, None]
+ no_argkey_group: Dict[nodes.Item, None] = {}
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
- argkeys = order_preserving_dict.fromkeys(
+ argkeys = dict.fromkeys(
(k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None
)
if not argkeys:
assert function.parent is not None
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
- request = function._request = FixtureRequest(function)
+ request = function._request = FixtureRequest(function, _ispytest=True)
request._fillfixtures()
# Prune out funcargs for jstests.
newfuncargs = {}
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
- closure = set() # type: Set[str]
+ closure: Set[str] = set()
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
indirectly.
"""
- def __init__(self, pyfuncitem) -> None:
+ def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None:
+ check_ispytest(_ispytest)
self._pyfuncitem = pyfuncitem
#: Fixture for which this request is being performed.
- self.fixturename = None # type: Optional[str]
+ self.fixturename: Optional[str] = None
#: Scope string, one of "function", "class", "module", "session".
- self.scope = "function" # type: _Scope
- self._fixture_defs = {} # type: Dict[str, FixtureDef[Any]]
- fixtureinfo = pyfuncitem._fixtureinfo # type: FuncFixtureInfo
+ self.scope: _Scope = "function"
+ self._fixture_defs: Dict[str, FixtureDef[Any]] = {}
+ fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
- self._arg2index = {} # type: Dict[str, int]
- self._fixturemanager = (
- pyfuncitem.session._fixturemanager
- ) # type: FixtureManager
+ self._arg2index: Dict[str, int] = {}
+ self._fixturemanager: FixtureManager = (pyfuncitem.session._fixturemanager)
@property
def fixturenames(self) -> List[str]:
@property
def config(self) -> Config:
"""The pytest config object associated with this request."""
- return self._pyfuncitem.config # type: ignore[no-any-return] # noqa: F723
+ return self._pyfuncitem.config # type: ignore[no-any-return]
@property
def function(self):
"""Test function object if the request has a per-function scope."""
if self.scope != "function":
raise AttributeError(
- "function not available in {}-scoped context".format(self.scope)
+ f"function not available in {self.scope}-scoped context"
)
return self._pyfuncitem.obj
def cls(self):
"""Class (can be None) where the test function was collected."""
if self.scope not in ("class", "function"):
- raise AttributeError(
- "cls not available in {}-scoped context".format(self.scope)
- )
+ raise AttributeError(f"cls not available in {self.scope}-scoped context")
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
def module(self):
"""Python module object where the test function was collected."""
if self.scope not in ("function", "class", "module"):
- raise AttributeError(
- "module not available in {}-scoped context".format(self.scope)
- )
+ raise AttributeError(f"module not available in {self.scope}-scoped context")
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@property
def fspath(self) -> py.path.local:
"""The file system path of the test module which collected this test."""
if self.scope not in ("function", "class", "module", "package"):
- raise AttributeError(
- "module not available in {}-scoped context".format(self.scope)
- )
+ raise AttributeError(f"module not available in {self.scope}-scoped context")
# TODO: Remove ignore once _pyfuncitem is properly typed.
return self._pyfuncitem.fspath # type: ignore
return self.node.keywords
@property
- def session(self):
+ def session(self) -> "Session":
"""Pytest session object."""
- return self._pyfuncitem.session
+ return self._pyfuncitem.session # type: ignore[no-any-return]
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
"""Add finalizer/teardown function to be called after the last test
finalizer=finalizer, colitem=colitem
)
- def applymarker(self, marker) -> None:
+ def applymarker(self, marker: Union[str, MarkDecorator]) -> None:
"""Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
- scope = "function" # type: _Scope
+ scope: _Scope = "function"
return PseudoFixtureDef(cached_result, scope)
raise
# Remove indent to prevent the python3 exception
def _get_fixturestack(self) -> List["FixtureDef[Any]"]:
current = self
- values = [] # type: List[FixtureDef[Any]]
+ values: List[FixtureDef[Any]] = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
if paramscopenum is not None:
scope = scopes[paramscopenum]
- subrequest = SubRequest(self, scope, param, param_index, fixturedef)
+ subrequest = SubRequest(
+ self, scope, param, param_index, fixturedef, _ispytest=True
+ )
# Check if a higher-level scoped fixture accesses a lower level one.
subrequest._check_scope(argname, self.scope, scope)
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
)
- def _check_scope(self, argname, invoking_scope: "_Scope", requested_scope) -> None:
+ def _check_scope(
+ self, argname: str, invoking_scope: "_Scope", requested_scope: "_Scope",
+ ) -> None:
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args))
return lines
- def _getscopeitem(self, scope):
+ def _getscopeitem(self, scope: "_Scope") -> Union[nodes.Item, nodes.Collector]:
if scope == "function":
# This might also be a non-function Item despite its attribute name.
- return self._pyfuncitem
- if scope == "package":
+ node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem
+ elif scope == "package":
# FIXME: _fixturedef is not defined on FixtureRequest (this class),
# but on FixtureRequest (a subclass).
node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined]
param,
param_index: int,
fixturedef: "FixtureDef[object]",
+ *,
+ _ispytest: bool = False,
) -> None:
+ check_ispytest(_ispytest)
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self._fixturemanager = request._fixturemanager
def __repr__(self) -> str:
- return "<SubRequest {!r} for {!r}>".format(self.fixturename, self._pyfuncitem)
+ return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>"
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
+ """Add finalizer/teardown function to be called after the last test
+ within the requesting test context finished execution."""
self._fixturedef.addfinalizer(finalizer)
def _schedule_finalizers(
super()._schedule_finalizers(fixturedef, subrequest)
-scopes = ["session", "package", "module", "class", "function"] # type: List[_Scope]
+scopes: List["_Scope"] = ["session", "package", "module", "class", "function"]
scopenum_function = scopes.index("function")
def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int:
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined."""
- strscopes = scopes # type: Sequence[str]
+ strscopes: Sequence[str] = scopes
try:
return strscopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
- descr, "from {} ".format(where) if where else "", scope
+ descr, f"from {where} " if where else "", scope
),
pytrace=False,
)
self.msg = msg
def formatrepr(self) -> "FixtureLookupErrorRepr":
- tblines = [] # type: List[str]
+ tblines: List[str] = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
self.argname
)
else:
- msg = "fixture '{}' not found".format(self.argname)
+ msg = f"fixture '{self.argname}' not found"
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
)
for line in lines[1:]:
tw.line(
- "{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
- red=True,
+ f"{FormattedExcinfo.flow_marker} {line.strip()}", red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
try:
fixture_result = next(generator)
except StopIteration:
- raise ValueError(
- "{} did not yield a value".format(request.fixturename)
- ) from None
+ raise ValueError(f"{request.fixturename} did not yield a value") from None
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator)
request.addfinalizer(finalizer)
else:
def __init__(
self,
fixturemanager: "FixtureManager",
- baseid,
+ baseid: Optional[str],
argname: str,
func: "_FixtureFunc[_FixtureValue]",
scope: "Union[_Scope, Callable[[str, Config], _Scope]]",
self.scopenum = scope2index(
# TODO: Check if the `or` here is really necessary.
scope_ or "function", # type: ignore[unreachable]
- descr="Fixture '{}'".format(func.__name__),
+ descr=f"Fixture '{func.__name__}'",
where=baseid,
)
self.scope = scope_
- self.params = params # type: Optional[Sequence[object]]
- self.argnames = getfuncargnames(
+ self.params: Optional[Sequence[object]] = params
+ self.argnames: Tuple[str, ...] = getfuncargnames(
func, name=argname, is_method=unittest
- ) # type: Tuple[str, ...]
+ )
self.unittest = unittest
self.ids = ids
- self.cached_result = None # type: Optional[_FixtureCachedResult[_FixtureValue]]
- self._finalizers = [] # type: List[Callable[[], object]]
+ self.cached_result: Optional[_FixtureCachedResult[_FixtureValue]] = None
+ self._finalizers: List[Callable[[], object]] = []
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
self._finalizers.append(finalizer)
return tuple(params) if params is not None else None
-def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
+def wrap_function_to_error_out_if_called_directly(
+ function: _FixtureFunction, fixture_marker: "FixtureFunctionMarker",
+) -> _FixtureFunction:
"""Wrap the given fixture function so we can raise an error about it being called directly,
instead of used as an argument in a test function."""
message = (
# further than this point and lose useful wrappings like @mock.patch (#3774).
result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined]
- return result
+ return cast(_FixtureFunction, result)
@final
Callable[[Any], Optional[object]],
]
] = ...,
- name: Optional[str] = ...
+ name: Optional[str] = ...,
) -> _FixtureFunction:
...
-@overload # noqa: F811
-def fixture( # noqa: F811
+@overload
+def fixture(
fixture_function: None = ...,
*,
scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ...,
Callable[[Any], Optional[object]],
]
] = ...,
- name: Optional[str] = None
+ name: Optional[str] = None,
) -> FixtureFunctionMarker:
...
-def fixture( # noqa: F811
+def fixture(
fixture_function: Optional[_FixtureFunction] = None,
*,
scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = "function",
Callable[[Any], Optional[object]],
]
] = None,
- name: Optional[str] = None
+ name: Optional[str] = None,
) -> Union[FixtureFunctionMarker, _FixtureFunction]:
"""Decorator to mark a fixture factory function.
params=None,
autouse=False,
ids=None,
- name=None
+ name=None,
):
"""(Return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
+ warnings.warn(YIELD_FIXTURE, stacklevel=2)
return fixture(
fixture_function,
*args,
def __init__(self, session: "Session") -> None:
self.session = session
- self.config = session.config # type: Config
- self._arg2fixturedefs = {} # type: Dict[str, List[FixtureDef[Any]]]
- self._holderobjseen = set() # type: Set[object]
- self._nodeid_and_autousenames = [
- ("", self.config.getini("usefixtures"))
- ] # type: List[Tuple[str, List[str]]]
+ self.config: Config = session.config
+ self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {}
+ self._holderobjseen: Set[object] = set()
+ # A mapping from a nodeid to a list of autouse fixtures it defines.
+ self._nodeid_autousenames: Dict[str, List[str]] = {
+ "": self.config.getini("usefixtures"),
+ }
session.config.pluginmanager.register(self, "funcmanage")
- def _get_direct_parametrize_args(self, node: "nodes.Node") -> List[str]:
+ def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]:
"""Return all direct parametrization arguments of a node, so we don't
mistake them for fixtures.
These things are done later as well when dealing with parametrization
so this could be improved.
"""
- parametrize_argnames = [] # type: List[str]
+ parametrize_argnames: List[str] = []
for marker in node.iter_markers(name="parametrize"):
if not marker.kwargs.get("indirect", False):
p_argnames, _ = ParameterSet._parse_parametrize_args(
return parametrize_argnames
def getfixtureinfo(
- self, node: "nodes.Node", func, cls, funcargs: bool = True
+ self, node: nodes.Node, func, cls, funcargs: bool = True
) -> FuncFixtureInfo:
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, name=node.name, cls=cls)
except AttributeError:
pass
else:
- from _pytest import nodes
-
# Construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id).
self.parsefactories(plugin, nodeid)
- def _getautousenames(self, nodeid: str) -> List[str]:
- """Return a list of fixture names to be used."""
- autousenames = [] # type: List[str]
- for baseid, basenames in self._nodeid_and_autousenames:
- if nodeid.startswith(baseid):
- if baseid:
- i = len(baseid)
- nextchar = nodeid[i : i + 1]
- if nextchar and nextchar not in ":/":
- continue
- autousenames.extend(basenames)
- return autousenames
+ def _getautousenames(self, nodeid: str) -> Iterator[str]:
+ """Return the names of autouse fixtures applicable to nodeid."""
+ for parentnodeid in nodes.iterparentnodeids(nodeid):
+ basenames = self._nodeid_autousenames.get(parentnodeid)
+ if basenames:
+ yield from basenames
def getfixtureclosure(
- self, fixturenames: Tuple[str, ...], parentnode, ignore_args: Sequence[str] = ()
+ self,
+ fixturenames: Tuple[str, ...],
+ parentnode: nodes.Node,
+ ignore_args: Sequence[str] = (),
) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]:
# Collect the closure of all fixtures, starting with the given
# fixturenames as the initial set. As we have to visit all
# (discovering matching fixtures for a given name/node is expensive).
parentid = parentnode.nodeid
- fixturenames_closure = self._getautousenames(parentid)
+ fixturenames_closure = list(self._getautousenames(parentid))
def merge(otherlist: Iterable[str]) -> None:
for arg in otherlist:
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
- arg2fixturedefs = {} # type: Dict[str, Sequence[FixtureDef[Any]]]
+ arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
# Try next super fixture, if any.
- def pytest_collection_modifyitems(self, items: "List[nodes.Item]") -> None:
+ def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None:
# Separate parametrized setups.
items[:] = reorder_items(items)
autousenames.append(name)
if autousenames:
- self._nodeid_and_autousenames.append((nodeid or "", autousenames))
+ self._nodeid_autousenames.setdefault(nodeid or "", []).extend(autousenames)
def getfixturedefs(
self, argname: str, nodeid: str
def _matchfactories(
self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str
) -> Iterator[FixtureDef[Any]]:
- from _pytest import nodes
-
+ parentnodeids = set(nodes.iterparentnodeids(nodeid))
for fixturedef in fixturedefs:
- if nodes.ischildnode(fixturedef.baseid, nodeid):
+ if fixturedef.baseid in parentnodeids:
yield fixturedef
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
- config = outcome.get_result() # type: Config
+ config: Config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, "w")
for line in plugininfo:
sys.stderr.write(line + "\n")
else:
- sys.stderr.write("pytest {}\n".format(pytest.__version__))
+ sys.stderr.write(f"pytest {pytest.__version__}\n")
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
if type is None:
type = "string"
if help is None:
- raise TypeError("help argument cannot be None for {}".format(name))
- spec = "{} ({}):".format(name, type)
+ raise TypeError(f"help argument cannot be None for {name}")
+ spec = f"{name} ({type}):"
tw.write(" %s" % spec)
spec_len = len(spec)
if spec_len > (indent_len - 3):
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
]
for name, help in vars:
- tw.line(" {:<24} {}".format(name, help))
+ tw.line(f" {name:<24} {help}")
tw.line()
tw.line()
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, "__file__", repr(plugin))
- content = "{}-{} at {}".format(dist.project_name, dist.version, loc)
+ content = f"{dist.project_name}-{dist.version} at {loc}"
lines.append(" " + content)
return lines
def pytest_report_header(config: Config) -> List[str]:
lines = []
if config.option.debug or config.option.traceconfig:
- lines.append(
- "using: pytest-{} pylib-{}".format(pytest.__version__, py.__version__)
- )
+ lines.append(f"using: pytest-{pytest.__version__} pylib-{py.__version__}")
verinfo = getpluginversioninfo(config)
if verinfo:
r = plugin.__file__
else:
r = repr(plugin)
- lines.append(" {:<20}: {}".format(name, r))
+ lines.append(f" {name:<20}: {r}")
return lines
from typing import Optional
from typing import Sequence
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import py.path
from pluggy import HookspecMarker
-from _pytest.compat import TYPE_CHECKING
from _pytest.deprecated import WARNING_CAPTURED_HOOK
if TYPE_CHECKING:
See :func:`pytest_runtest_protocol` for a description of the runtest protocol.
:param str nodeid: Full node ID of the item.
- :param location: A triple of ``(filename, lineno, testname)``.
+ :param location: A tuple of ``(filename, lineno, testname)``.
"""
See :func:`pytest_runtest_protocol` for a description of the runtest protocol.
:param str nodeid: Full node ID of the item.
- :param location: A triple of ``(filename, lineno, testname)``.
+ :param location: A tuple of ``(filename, lineno, testname)``.
"""
"""
+# -------------------------------------------------------------------------
+# Hooks for influencing skipping
+# -------------------------------------------------------------------------
+
+
+def pytest_markeval_namespace(config: "Config") -> Dict[str, Any]:
+ """Called when constructing the globals dictionary used for
+ evaluating string conditions in xfail/skipif markers.
+
+ This is useful when the condition for a marker requires
+ objects that are expensive or impossible to obtain during
+ collection time, which is required by normal boolean
+ conditions.
+
+ .. versionadded:: 6.2
+
+ :param _pytest.config.Config config: The pytest config object.
+ :returns: A dictionary of additional globals to add.
+ """
+
+
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
self.add_stats = self.xml.add_stats
self.family = self.xml.family
self.duration = 0
- self.properties = [] # type: List[Tuple[str, str]]
- self.nodes = [] # type: List[ET.Element]
- self.attrs = {} # type: Dict[str, str]
+ self.properties: List[Tuple[str, str]] = []
+ self.nodes: List[ET.Element] = []
+ self.attrs: Dict[str, str] = {}
def append(self, node: ET.Element) -> None:
self.xml.add_stats(node.tag)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
- attrs = {
+ attrs: Dict[str, str] = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
- } # type: Dict[str, str]
+ }
if testreport.location[1] is not None:
attrs["line"] = str(testreport.location[1])
if hasattr(testreport, "url"):
self._add_simple("skipped", "xfail-marked test passes unexpectedly")
else:
assert report.longrepr is not None
- reprcrash = getattr(
+ reprcrash: Optional[ReprFileLocation] = getattr(
report.longrepr, "reprcrash", None
- ) # type: Optional[ReprFileLocation]
+ )
if reprcrash is not None:
message = reprcrash.message
else:
def append_error(self, report: TestReport) -> None:
assert report.longrepr is not None
- reprcrash = getattr(
+ reprcrash: Optional[ReprFileLocation] = getattr(
report.longrepr, "reprcrash", None
- ) # type: Optional[ReprFileLocation]
+ )
if reprcrash is not None:
reason = reprcrash.message
else:
reason = str(report.longrepr)
if report.when == "teardown":
- msg = 'failed on teardown with "{}"'.format(reason)
+ msg = f'failed on teardown with "{reason}"'
else:
- msg = 'failed on setup with "{}"'.format(reason)
+ msg = f'failed on setup with "{reason}"'
self._add_simple("error", msg, str(report.longrepr))
def append_skipped(self, report: TestReport) -> None:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
- details = "{}:{}: {}".format(filename, lineno, skipreason)
+ details = f"{filename}:{lineno}: {skipreason}"
skipped = ET.Element("skipped", type="pytest.skip", message=skipreason)
skipped.text = bin_xml_escape(details)
self.log_passing_tests = log_passing_tests
self.report_duration = report_duration
self.family = family
- self.stats = dict.fromkeys(
+ self.stats: Dict[str, int] = dict.fromkeys(
["error", "passed", "failure", "skipped"], 0
- ) # type: Dict[str, int]
- self.node_reporters = (
- {}
- ) # type: Dict[Tuple[Union[str, TestReport], object], _NodeReporter]
- self.node_reporters_ordered = [] # type: List[_NodeReporter]
- self.global_properties = [] # type: List[Tuple[str, str]]
+ )
+ self.node_reporters: Dict[
+ Tuple[Union[str, TestReport], object], _NodeReporter
+ ] = ({})
+ self.node_reporters_ordered: List[_NodeReporter] = []
+ self.global_properties: List[Tuple[str, str]] = []
# List of reports that failed on call but teardown is pending.
- self.open_reports = [] # type: List[TestReport]
+ self.open_reports: List[TestReport] = []
self.cnt_double_fail_tests = 0
# Replaces convenience family with real family.
reporter.finalize()
def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter:
- nodeid = getattr(report, "nodeid", report) # type: Union[str, TestReport]
+ nodeid: Union[str, TestReport] = getattr(report, "nodeid", report)
# Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
logfile.close()
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
- terminalreporter.write_sep("-", "generated xml file: {}".format(self.logfile))
+ terminalreporter.write_sep("-", f"generated xml file: {self.logfile}")
def add_global_property(self, name: str, value: object) -> None:
__tracebackhide__ = True
import sys
from contextlib import contextmanager
from io import StringIO
+from pathlib import Path
from typing import AbstractSet
from typing import Dict
from typing import Generator
from typing import TypeVar
from typing import Union
-import pytest
from _pytest import nodes
from _pytest._io import TerminalWriter
from _pytest.capture import CaptureManager
from _pytest.config import _strtobool
from _pytest.config import Config
from _pytest.config import create_terminal_writer
+from _pytest.config import hookimpl
+from _pytest.config import UsageError
from _pytest.config.argparsing import Parser
+from _pytest.deprecated import check_ispytest
+from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.main import Session
-from _pytest.pathlib import Path
from _pytest.store import StoreKey
from _pytest.terminal import TerminalReporter
"""A logging formatter which colorizes the %(levelname)..s part of the
log format passed to __init__."""
- LOGLEVEL_COLOROPTS = {
+ LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
- } # type: Mapping[int, AbstractSet[str]]
+ }
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)")
def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._original_fmt = self._style._fmt
- self._level_to_fmt_mapping = {} # type: Dict[int, str]
+ self._level_to_fmt_mapping: Dict[int, str] = {}
assert self._fmt is not None
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
- stream = None # type: StringIO
+ stream: StringIO
def __init__(self) -> None:
"""Create a new log handler."""
super().__init__(StringIO())
- self.records = [] # type: List[logging.LogRecord]
+ self.records: List[logging.LogRecord] = []
def emit(self, record: logging.LogRecord) -> None:
"""Keep the log records in a list in addition to the log text."""
class LogCaptureFixture:
"""Provides access and control of log capturing."""
- def __init__(self, item: nodes.Node) -> None:
+ def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
+ check_ispytest(_ispytest)
self._item = item
- self._initial_handler_level = None # type: Optional[int]
+ self._initial_handler_level: Optional[int] = None
# Dict of log name -> log level.
- self._initial_logger_levels = {} # type: Dict[Optional[str], int]
+ self._initial_logger_levels: Dict[Optional[str], int] = {}
def _finalize(self) -> None:
"""Finalize the fixture.
self.handler.setLevel(handler_orig_level)
-@pytest.fixture
+@fixture
def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]:
"""Access and control log capturing.
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
- result = LogCaptureFixture(request.node)
+ result = LogCaptureFixture(request.node, _ispytest=True)
yield result
result._finalize()
return int(getattr(logging, log_level, log_level))
except ValueError as e:
# Python logging does not recognise this as a logging level
- raise pytest.UsageError(
+ raise UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
# run after terminalreporter/capturemanager are configured
-@pytest.hookimpl(trylast=True)
+@hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
capture_manager = config.pluginmanager.get_plugin("capturemanager")
# if capturemanager plugin is disabled, live logging still works.
- self.log_cli_handler = _LiveLoggingStreamHandler(
- terminal_reporter, capture_manager
- ) # type: Union[_LiveLoggingStreamHandler, _LiveLoggingNullHandler]
+ self.log_cli_handler: Union[
+ _LiveLoggingStreamHandler, _LiveLoggingNullHandler
+ ] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
else:
self.log_cli_handler = _LiveLoggingNullHandler()
log_cli_formatter = self._create_formatter(
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
):
- formatter = ColoredLevelFormatter(
+ formatter: logging.Formatter = ColoredLevelFormatter(
create_terminal_writer(self._config), log_format, log_date_format
- ) # type: logging.Formatter
+ )
else:
formatter = logging.Formatter(log_format, log_date_format)
return True
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("sessionstart")
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("collection")
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]:
if session.config.option.collectonly:
yield
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # Run all the tests.
- @pytest.hookimpl
+ @hookimpl
def pytest_runtest_logstart(self) -> None:
self.log_cli_handler.reset()
self.log_cli_handler.set_when("start")
- @pytest.hookimpl
+ @hookimpl
def pytest_runtest_logreport(self) -> None:
self.log_cli_handler.set_when("logreport")
log = report_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("setup")
- empty = {} # type: Dict[str, List[logging.LogRecord]]
+ empty: Dict[str, List[logging.LogRecord]] = {}
item._store[caplog_records_key] = empty
yield from self._runtest_for(item, "setup")
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("call")
yield from self._runtest_for(item, "call")
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]:
self.log_cli_handler.set_when("teardown")
del item._store[caplog_records_key]
del item._store[caplog_handler_key]
- @pytest.hookimpl
+ @hookimpl
def pytest_runtest_logfinish(self) -> None:
self.log_cli_handler.set_when("finish")
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self) -> Generator[None, None, None]:
self.log_cli_handler.set_when("sessionfinish")
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
- @pytest.hookimpl
+ @hookimpl
def pytest_unconfigure(self) -> None:
# Close the FileHandler explicitly.
# (logging.shutdown might have lost the weakref?!)
# Officially stream needs to be a IO[str], but TerminalReporter
# isn't. So force it.
- stream = None # type: TerminalReporter # type: ignore
+ stream: TerminalReporter = None # type: ignore
def __init__(
self,
import importlib
import os
import sys
+from pathlib import Path
from typing import Callable
from typing import Dict
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import Optional
+from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
import attr
import _pytest._code
from _pytest import nodes
from _pytest.compat import final
-from _pytest.compat import overload
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import directory_arg
from _pytest.config import ExitCode
from _pytest.outcomes import exit
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import Path
from _pytest.pathlib import visit
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
if TYPE_CHECKING:
- from typing import Type
from typing_extensions import Literal
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
- default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
+ default=[
+ "*.egg",
+ ".*",
+ "_darcs",
+ "build",
+ "CVS",
+ "dist",
+ "node_modules",
+ "venv",
+ "{arch}",
+ ],
)
parser.addini(
"testpaths",
)
group._addoption(
"--strict-markers",
- "--strict",
action="store_true",
help="markers not registered in the `markers` section of the configuration file raise errors.",
)
+ group._addoption(
+ "--strict", action="store_true", help="(deprecated) alias to --strict-markers.",
+ )
group._addoption(
"-c",
metavar="file",
session.exitstatus = ExitCode.TESTS_FAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
- exitstatus = ExitCode.INTERRUPTED # type: Union[int, ExitCode]
+ exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED
if isinstance(excinfo.value, exit.Exception):
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
if initstate < 2:
- sys.stderr.write(
- "{}: {}\n".format(excinfo.typename, excinfo.value.msg)
- )
+ sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n")
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except BaseException:
Interrupted = Interrupted
Failed = Failed
# Set on the session by runner.pytest_sessionstart.
- _setupstate = None # type: SetupState
+ _setupstate: SetupState
# Set on the session by fixtures.pytest_sessionstart.
- _fixturemanager = None # type: FixtureManager
- exitstatus = None # type: Union[int, ExitCode]
+ _fixturemanager: FixtureManager
+ exitstatus: Union[int, ExitCode]
def __init__(self, config: Config) -> None:
super().__init__(
)
self.testsfailed = 0
self.testscollected = 0
- self.shouldstop = False # type: Union[bool, str]
- self.shouldfail = False # type: Union[bool, str]
+ self.shouldstop: Union[bool, str] = False
+ self.shouldfail: Union[bool, str] = False
self.trace = config.trace.root.get("collection")
self.startdir = config.invocation_dir
- self._initialpaths = frozenset() # type: FrozenSet[py.path.local]
+ self._initialpaths: FrozenSet[py.path.local] = frozenset()
- self._bestrelpathcache = _bestrelpath_cache(
- config.rootpath
- ) # type: Dict[Path, str]
+ self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath)
self.config.pluginmanager.register(self, name="session")
@classmethod
def from_config(cls, config: Config) -> "Session":
- session = cls._create(config) # type: Session
+ session: Session = cls._create(config)
return session
def __repr__(self) -> str:
) -> Sequence[nodes.Item]:
...
- @overload # noqa: F811
- def perform_collect( # noqa: F811
+ @overload
+ def perform_collect(
self, args: Optional[Sequence[str]] = ..., genitems: bool = ...
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
...
- def perform_collect( # noqa: F811
+ def perform_collect(
self, args: Optional[Sequence[str]] = None, genitems: bool = True
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
"""Perform the collection phase for this session.
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
- self._notfound = [] # type: List[Tuple[str, Sequence[nodes.Collector]]]
- self._initial_parts = [] # type: List[Tuple[py.path.local, List[str]]]
- self.items = [] # type: List[nodes.Item]
+ self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = []
+ self._initial_parts: List[Tuple[py.path.local, List[str]]] = []
+ self.items: List[nodes.Item] = []
hook = self.config.hook
- items = self.items # type: Sequence[Union[nodes.Item, nodes.Collector]]
+ items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items
try:
- initialpaths = [] # type: List[py.path.local]
+ initialpaths: List[py.path.local] = []
for arg in args:
fspath, parts = resolve_collection_argument(
self.config.invocation_params.dir,
if self._notfound:
errors = []
for arg, cols in self._notfound:
- line = "(no name {!r} in any of {!r})".format(arg, cols)
- errors.append("not found: {}\n{}".format(arg, line))
+ line = f"(no name {arg!r} in any of {cols!r})"
+ errors.append(f"not found: {arg}\n{line}")
raise UsageError(*errors)
if not genitems:
items = rep.result
from _pytest.python import Package
# Keep track of any collected nodes in here, so we don't duplicate fixtures.
- node_cache1 = {} # type: Dict[py.path.local, Sequence[nodes.Collector]]
- node_cache2 = (
- {}
- ) # type: Dict[Tuple[Type[nodes.Collector], py.path.local], nodes.Collector]
+ node_cache1: Dict[py.path.local, Sequence[nodes.Collector]] = {}
+ node_cache2: Dict[
+ Tuple[Type[nodes.Collector], py.path.local], nodes.Collector
+ ] = ({})
# Keep track of any collected collectors in matchnodes paths, so they
# are not collected more than once.
- matchnodes_cache = (
- {}
- ) # type: Dict[Tuple[Type[nodes.Collector], str], CollectReport]
+ matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = ({})
# Dirnames of pkgs with dunder-init files.
- pkg_roots = {} # type: Dict[str, Package]
+ pkg_roots: Dict[str, Package] = {}
for argpath, names in self._initial_parts:
self.trace("processing argument", (argpath, names))
if argpath.check(dir=1):
assert not names, "invalid arg {!r}".format((argpath, names))
- seen_dirs = set() # type: Set[py.path.local]
+ seen_dirs: Set[py.path.local] = set()
for direntry in visit(str(argpath), self._recurse):
if not direntry.is_file():
continue
node_cache1[argpath] = col
matching = []
- work = [
- (col, names)
- ] # type: List[Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]]]
+ work: List[
+ Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]]
+ ] = [(col, names)]
while work:
self.trace("matchnodes", col, names)
self.trace.root.indent += 1
self._notfound.append((report_arg, col))
continue
- # If __init__.py was the only file requested, then the matched node will be
- # the corresponding Package, and the first yielded item will be the __init__
- # Module itself, so just use that. If this special case isn't taken, then all
- # the files in the package will be yielded.
- if argpath.basename == "__init__.py":
- assert isinstance(matching[0], nodes.Collector)
+ # If __init__.py was the only file requested, then the matched
+ # node will be the corresponding Package (by default), and the
+ # first yielded item will be the __init__ Module itself, so
+ # just use that. If this special case isn't taken, then all the
+ # files in the package will be yielded.
+ if argpath.basename == "__init__.py" and isinstance(
+ matching[0], Package
+ ):
try:
yield next(iter(matching[0].collect()))
except StopIteration:
"""Generic mechanism for marking and selecting python functions."""
-import typing
import warnings
from typing import AbstractSet
+from typing import Collection
from typing import List
from typing import Optional
+from typing import TYPE_CHECKING
from typing import Union
import attr
from .structures import MarkDecorator
from .structures import MarkGenerator
from .structures import ParameterSet
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
def param(
*values: object,
- marks: "Union[MarkDecorator, typing.Collection[Union[MarkDecorator, Mark]]]" = (),
- id: Optional[str] = None
+ marks: Union[MarkDecorator, Collection[Union[MarkDecorator, Mark]]] = (),
+ id: Optional[str] = None,
) -> ParameterSet:
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
expression = Expression.compile(keywordexpr)
except ParseError as e:
raise UsageError(
- "Wrong expression passed to '-k': {}: {}".format(keywordexpr, e)
+ f"Wrong expression passed to '-k': {keywordexpr}: {e}"
) from None
remaining = []
try:
expression = Expression.compile(matchexpr)
except ParseError as e:
- raise UsageError(
- "Wrong expression passed to '-m': {}: {}".format(matchexpr, e)
- ) from None
+ raise UsageError(f"Wrong expression passed to '-m': {matchexpr}: {e}") from None
remaining = []
deselected = []
from typing import Mapping
from typing import Optional
from typing import Sequence
+from typing import TYPE_CHECKING
import attr
-from _pytest.compat import TYPE_CHECKING
-
if TYPE_CHECKING:
from typing import NoReturn
self.message = message
def __str__(self) -> str:
- return "at column {}: {}".format(self.column, self.message)
+ return f"at column {self.column}: {self.message}"
class Scanner:
def expression(s: Scanner) -> ast.Expression:
if s.accept(TokenType.EOF):
- ret = ast.NameConstant(False) # type: ast.expr
+ ret: ast.expr = ast.NameConstant(False)
else:
ret = expr(s)
s.accept(TokenType.EOF, reject=True)
:param input: The input expression - one line.
"""
astexpr = expression(Scanner(input))
- code = compile(
+ code: types.CodeType = compile(
astexpr, filename="<pytest match expression>", mode="eval",
- ) # type: types.CodeType
+ )
return Expression(code)
def evaluate(self, matcher: Callable[[str], bool]) -> bool:
:returns: Whether the expression matches or not.
"""
- ret = eval(
- self.code, {"__builtins__": {}}, MatcherAdapter(matcher)
- ) # type: bool
+ ret: bool = eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))
return ret
import collections.abc
import inspect
-import typing
import warnings
from typing import Any
from typing import Callable
+from typing import Collection
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
+from typing import MutableMapping
from typing import NamedTuple
from typing import Optional
+from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from ..compat import final
from ..compat import NOTSET
from ..compat import NotSetType
-from ..compat import overload
-from ..compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.outcomes import fail
from _pytest.warning_types import PytestUnknownMarkWarning
if TYPE_CHECKING:
- from typing import Type
-
from ..nodes import Node
"ParameterSet",
[
("values", Sequence[Union[object, NotSetType]]),
- ("marks", "typing.Collection[Union[MarkDecorator, Mark]]"),
+ ("marks", Collection[Union["MarkDecorator", "Mark"]]),
("id", Optional[str]),
],
)
def param(
cls,
*values: object,
- marks: "Union[MarkDecorator, typing.Collection[Union[MarkDecorator, Mark]]]" = (),
- id: Optional[str] = None
+ marks: Union["MarkDecorator", Collection[Union["MarkDecorator", "Mark"]]] = (),
+ id: Optional[str] = None,
) -> "ParameterSet":
if isinstance(marks, MarkDecorator):
marks = (marks,)
else:
- # TODO(py36): Change to collections.abc.Collection.
- assert isinstance(marks, (collections.abc.Sequence, set))
+ assert isinstance(marks, collections.abc.Collection)
if id is not None:
if not isinstance(id, str):
return cls.param(parameterset)
else:
# TODO: Refactor to fix this type-ignore. Currently the following
- # type-checks but crashes:
+ # passes type-checking but crashes:
#
# @pytest.mark.parametrize(('x', 'y'), [1, 2])
# def test_foo(x, y): pass
argnames: Union[str, List[str], Tuple[str, ...]],
argvalues: Iterable[Union["ParameterSet", Sequence[object], object]],
*args,
- **kwargs
+ **kwargs,
) -> Tuple[Union[List[str], Tuple[str, ...]], bool]:
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
assert self.name == other.name
# Remember source of ids with parametrize Marks.
- param_ids_from = None # type: Optional[Mark]
+ param_ids_from: Optional[Mark] = None
if self.name == "parametrize":
if other._has_param_ids():
param_ids_from = other
return self.name # for backward-compat (2.4.1 had this attr)
def __repr__(self) -> str:
- return "<MarkDecorator {!r}>".format(self.mark)
+ return f"<MarkDecorator {self.mark!r}>"
def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator":
"""Return a MarkDecorator with extra arguments added.
def __call__(self, arg: _Markable) -> _Markable: # type: ignore[misc]
pass
- @overload # noqa: F811
- def __call__( # noqa: F811
- self, *args: object, **kwargs: object
- ) -> "MarkDecorator":
+ @overload
+ def __call__(self, *args: object, **kwargs: object) -> "MarkDecorator":
pass
- def __call__(self, *args: object, **kwargs: object): # noqa: F811
+ def __call__(self, *args: object, **kwargs: object):
"""Call the MarkDecorator."""
if args and not kwargs:
func = args[0]
] # unpack MarkDecorator
for mark in extracted:
if not isinstance(mark, Mark):
- raise TypeError("got {!r} instead of Mark".format(mark))
+ raise TypeError(f"got {mark!r} instead of Mark")
return [x for x in extracted if isinstance(x, Mark)]
def __call__(self, arg: _Markable) -> _Markable:
...
- @overload # noqa: F811
- def __call__(self, reason: str = ...) -> "MarkDecorator": # noqa: F811
+ @overload
+ def __call__(self, reason: str = ...) -> "MarkDecorator":
...
class _SkipifMarkDecorator(MarkDecorator):
self,
condition: Union[str, bool] = ...,
*conditions: Union[str, bool],
- reason: str = ...
+ reason: str = ...,
) -> MarkDecorator:
...
def __call__(self, arg: _Markable) -> _Markable:
...
- @overload # noqa: F811
- def __call__( # noqa: F811
+ @overload
+ def __call__(
self,
condition: Union[str, bool] = ...,
*conditions: Union[str, bool],
reason: str = ...,
run: bool = ...,
- raises: Union[
- "Type[BaseException]", Tuple["Type[BaseException]", ...]
- ] = ...,
- strict: bool = ...
+ raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ...,
+ strict: bool = ...,
) -> MarkDecorator:
...
Callable[[Any], Optional[object]],
]
] = ...,
- scope: Optional[_Scope] = ...
+ scope: Optional[_Scope] = ...,
) -> MarkDecorator:
...
applies a 'slowtest' :class:`Mark` on ``test_function``.
"""
- _config = None # type: Optional[Config]
- _markers = set() # type: Set[str]
+ _config: Optional[Config] = None
+ _markers: Set[str] = set()
# See TYPE_CHECKING above.
if TYPE_CHECKING:
- # TODO(py36): Change to builtin annotation syntax.
- skip = _SkipMarkDecorator(Mark("skip", (), {}))
- skipif = _SkipifMarkDecorator(Mark("skipif", (), {}))
- xfail = _XfailMarkDecorator(Mark("xfail", (), {}))
- parametrize = _ParametrizeMarkDecorator(Mark("parametrize", (), {}))
- usefixtures = _UsefixturesMarkDecorator(Mark("usefixtures", (), {}))
- filterwarnings = _FilterwarningsMarkDecorator(Mark("filterwarnings", (), {}))
+ skip: _SkipMarkDecorator
+ skipif: _SkipifMarkDecorator
+ xfail: _XfailMarkDecorator
+ parametrize: _ParametrizeMarkDecorator
+ usefixtures: _UsefixturesMarkDecorator
+ filterwarnings: _FilterwarningsMarkDecorator
def __getattr__(self, name: str) -> MarkDecorator:
if name[0] == "_":
# If the name is not in the set of known marks after updating,
# then it really is time to issue a warning or an error.
if name not in self._markers:
- if self._config.option.strict_markers:
+ if self._config.option.strict_markers or self._config.option.strict:
fail(
- "{!r} not found in `markers` configuration option".format(name),
+ f"{name!r} not found in `markers` configuration option",
pytrace=False,
)
# Raise a specific error for common misspellings of "parametrize".
if name in ["parameterize", "parametrise", "parameterise"]:
__tracebackhide__ = True
- fail("Unknown '{}' mark, did you mean 'parametrize'?".format(name))
+ fail(f"Unknown '{name}' mark, did you mean 'parametrize'?")
warnings.warn(
"Unknown pytest.mark.%s - is this a typo? You can register "
MARK_GEN = MarkGenerator()
-# TODO(py36): inherit from typing.MutableMapping[str, Any].
@final
-class NodeKeywords(collections.abc.MutableMapping): # type: ignore[type-arg]
+class NodeKeywords(MutableMapping[str, Any]):
def __init__(self, node: "Node") -> None:
self.node = node
self.parent = node.parent
return len(self._seen())
def __repr__(self) -> str:
- return "<NodeKeywords for node {}>".format(self.node)
+ return f"<NodeKeywords for node {self.node}>"
import sys
import warnings
from contextlib import contextmanager
+from pathlib import Path
from typing import Any
from typing import Generator
from typing import List
from typing import MutableMapping
from typing import Optional
+from typing import overload
from typing import Tuple
from typing import TypeVar
from typing import Union
-import pytest
from _pytest.compat import final
-from _pytest.compat import overload
from _pytest.fixtures import fixture
-from _pytest.pathlib import Path
+from _pytest.warning_types import PytestWarning
RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
if expected == used:
raise
else:
- raise ImportError("import error in {}: {}".format(used, ex)) from ex
+ raise ImportError(f"import error in {used}: {ex}") from ex
found = annotated_getattr(found, part, used)
return found
def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]:
if not isinstance(import_path, str) or "." not in import_path: # type: ignore[unreachable]
- raise TypeError(
- "must be absolute import path string, not {!r}".format(import_path)
- )
+ raise TypeError(f"must be absolute import path string, not {import_path!r}")
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
@final
class MonkeyPatch:
- """Object returned by the ``monkeypatch`` fixture keeping a record of
- setattr/item/env/syspath changes."""
+ """Helper to conveniently monkeypatch attributes/items/environment
+ variables/syspath.
+
+ Returned by the :fixture:`monkeypatch` fixture.
+
+ :versionchanged:: 6.2
+ Can now also be used directly as `pytest.MonkeyPatch()`, for when
+ the fixture is not available. In this case, use
+ :meth:`with MonkeyPatch.context() as mp: <context>` or remember to call
+ :meth:`undo` explicitly.
+ """
def __init__(self) -> None:
- self._setattr = [] # type: List[Tuple[object, str, object]]
- self._setitem = (
- []
- ) # type: List[Tuple[MutableMapping[Any, Any], object, object]]
- self._cwd = None # type: Optional[str]
- self._savesyspath = None # type: Optional[List[str]]
+ self._setattr: List[Tuple[object, str, object]] = []
+ self._setitem: List[Tuple[MutableMapping[Any, Any], object, object]] = ([])
+ self._cwd: Optional[str] = None
+ self._savesyspath: Optional[List[str]] = None
+ @classmethod
@contextmanager
- def context(self) -> Generator["MonkeyPatch", None, None]:
+ def context(cls) -> Generator["MonkeyPatch", None, None]:
"""Context manager that returns a new :class:`MonkeyPatch` object
which undoes any patching done inside the ``with`` block upon exit.
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
- m = MonkeyPatch()
+ m = cls()
try:
yield m
finally:
) -> None:
...
- @overload # noqa: F811
- def setattr( # noqa: F811
+ @overload
+ def setattr(
self, target: object, name: str, value: object, raising: bool = ...,
) -> None:
...
- def setattr( # noqa: F811
+ def setattr(
self,
target: Union[str, object],
name: Union[object, str],
oldval = getattr(target, name, notset)
if raising and oldval is notset:
- raise AttributeError("{!r} has no attribute {!r}".format(target, name))
+ raise AttributeError(f"{target!r} has no attribute {name!r}")
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
"""
if not isinstance(value, str):
warnings.warn( # type: ignore[unreachable]
- pytest.PytestWarning(
+ PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
False.
"""
- environ = os.environ # type: MutableMapping[str, str]
+ environ: MutableMapping[str, str] = os.environ
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
import os
import warnings
-from functools import lru_cache
-from typing import Any
+from pathlib import Path
from typing import Callable
-from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
+from typing import overload
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import cached_property
-from _pytest.compat import overload
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import ConftestImportFailure
from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH
-from _pytest.fixtures import FixtureDef
-from _pytest.fixtures import FixtureLookupError
from _pytest.mark.structures import Mark
from _pytest.mark.structures import MarkDecorator
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
from _pytest.pathlib import absolutepath
-from _pytest.pathlib import Path
from _pytest.store import Store
if TYPE_CHECKING:
- from typing import Type
-
# Imported here due to circular import.
from _pytest.main import Session
- from _pytest.warning_types import PytestWarning
from _pytest._code.code import _TracebackStyle
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
-@lru_cache(maxsize=None)
-def _splitnode(nodeid: str) -> Tuple[str, ...]:
- """Split a nodeid into constituent 'parts'.
+def iterparentnodeids(nodeid: str) -> Iterator[str]:
+ """Return the parent node IDs of a given node ID, inclusive.
- Node IDs are strings, and can be things like:
- ''
- 'testing/code'
- 'testing/code/test_excinfo.py'
- 'testing/code/test_excinfo.py::TestFormattedExcinfo'
+ For the node ID
- Return values are lists e.g.
- []
- ['testing', 'code']
- ['testing', 'code', 'test_excinfo.py']
- ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo']
- """
- if nodeid == "":
- # If there is no root node at all, return an empty list so the caller's
- # logic can remain sane.
- return ()
- parts = nodeid.split(SEP)
- # Replace single last element 'test_foo.py::Bar' with multiple elements
- # 'test_foo.py', 'Bar'.
- parts[-1:] = parts[-1].split("::")
- # Convert parts into a tuple to avoid possible errors with caching of a
- # mutable type.
- return tuple(parts)
-
-
-def ischildnode(baseid: str, nodeid: str) -> bool:
- """Return True if the nodeid is a child node of the baseid.
-
- E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz',
- but not of 'foo/blorp'.
+ "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
+
+ the result would be
+
+ ""
+ "testing"
+ "testing/code"
+ "testing/code/test_excinfo.py"
+ "testing/code/test_excinfo.py::TestFormattedExcinfo"
+ "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
+
+ Note that :: parts are only considered at the last / component.
"""
- base_parts = _splitnode(baseid)
- node_parts = _splitnode(nodeid)
- if len(node_parts) < len(base_parts):
- return False
- return node_parts[: len(base_parts)] == base_parts
+ pos = 0
+ sep = SEP
+ yield ""
+ while True:
+ at = nodeid.find(sep, pos)
+ if at == -1 and sep == SEP:
+ sep = "::"
+ elif at == -1:
+ if nodeid:
+ yield nodeid
+ break
+ else:
+ if at:
+ yield nodeid[:at]
+ pos = at + len(sep)
_NodeType = TypeVar("_NodeType", bound="Node")
#: The pytest config object.
if config:
- self.config = config # type: Config
+ self.config: Config = config
else:
if not parent:
raise TypeError("config or parent must be provided")
self.keywords = NodeKeywords(self)
#: The marker objects belonging to this node.
- self.own_markers = [] # type: List[Mark]
+ self.own_markers: List[Mark] = []
#: Allow adding of extra keywords to use for matching.
- self.extra_keyword_matches = set() # type: Set[str]
-
- # Used for storing artificial fixturedefs for direct parametrization.
- self._name2pseudofixturedef = {} # type: Dict[str, FixtureDef[Any]]
+ self.extra_keyword_matches: Set[str] = set()
if nodeid is not None:
assert "::()" not in nodeid
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
- def warn(self, warning: "PytestWarning") -> None:
+ def warn(self, warning: Warning) -> None:
"""Issue a warning for this Node.
Warnings will be displayed after the test session, unless explicitly suppressed.
:param Warning warning:
- The warning instance to issue. Must be a subclass of PytestWarning.
+ The warning instance to issue.
- :raises ValueError: If ``warning`` instance is not a subclass of PytestWarning.
+ :raises ValueError: If ``warning`` instance is not a subclass of Warning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
- """
- from _pytest.warning_types import PytestWarning
+ node.warn(UserWarning("some message"))
- if not isinstance(warning, PytestWarning):
+ .. versionchanged:: 6.2
+ Any subclass of :class:`Warning` is now accepted, rather than only
+ :class:`PytestWarning <pytest.PytestWarning>` subclasses.
+ """
+ # enforce type checks here to avoid getting a generic type error later otherwise.
+ if not isinstance(warning, Warning):
raise ValueError(
- "warning must be an instance of PytestWarning or subclass, got {!r}".format(
+ "warning must be an instance of Warning or subclass, got {!r}".format(
warning
)
)
"""Return list of all parent collectors up to self, starting from
the root of collection tree."""
chain = []
- item = self # type: Optional[Node]
+ item: Optional[Node] = self
while item is not None:
chain.append(item)
item = item.parent
def get_closest_marker(self, name: str) -> Optional[Mark]:
...
- @overload # noqa: F811
- def get_closest_marker(self, name: str, default: Mark) -> Mark: # noqa: F811
+ @overload
+ def get_closest_marker(self, name: str, default: Mark) -> Mark:
...
- def get_closest_marker( # noqa: F811
+ def get_closest_marker(
self, name: str, default: Optional[Mark] = None
) -> Optional[Mark]:
"""Return the first marker matching the name, from closest (for
def listextrakeywords(self) -> Set[str]:
"""Return a set of all extra keywords in self and any parents."""
- extra_keywords = set() # type: Set[str]
+ extra_keywords: Set[str] = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
"""
self.session._setupstate.addfinalizer(fin, self)
- def getparent(self, cls: "Type[_NodeType]") -> Optional[_NodeType]:
+ def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]:
"""Get the next parent node (including self) which is an instance of
the given class."""
- current = self # type: Optional[Node]
+ current: Optional[Node] = self
while current and not isinstance(current, cls):
current = current.parent
assert current is None or isinstance(current, cls)
excinfo: ExceptionInfo[BaseException],
style: "Optional[_TracebackStyle]" = None,
) -> TerminalRepr:
+ from _pytest.fixtures import FixtureLookupError
+
if isinstance(excinfo.value, ConftestImportFailure):
excinfo = ExceptionInfo(excinfo.value.excinfo)
if isinstance(excinfo.value, fail.Exception):
:rtype: A tuple of (str|py.path.local, int) with filename and line number.
"""
# See Item.location.
- location = getattr(
- node, "location", None
- ) # type: Optional[Tuple[str, Optional[int], str]]
+ location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None)
if location is not None:
return location[:2]
obj = getattr(node, "obj", None)
nodeid: Optional[str] = None,
) -> None:
super().__init__(name, parent, config, session, nodeid=nodeid)
- self._report_sections = [] # type: List[Tuple[str, str, str]]
+ self._report_sections: List[Tuple[str, str, str]] = []
#: A list of tuples (name, value) that holds user defined properties
#: for this test.
- self.user_properties = [] # type: List[Tuple[str, object]]
+ self.user_properties: List[Tuple[str, object]] = []
def runtest(self) -> None:
raise NotImplementedError("runtest must be implemented by Item subclass")
from typing import Callable
from typing import cast
from typing import Optional
+from typing import Type
from typing import TypeVar
TYPE_CHECKING = False # Avoid circular import through compat.
if TYPE_CHECKING:
from typing import NoReturn
- from typing import Type # noqa: F401 (used in type string)
from typing_extensions import Protocol
else:
# typing.Protocol is only available starting from Python 3.8. It is also
def __repr__(self) -> str:
if self.msg:
return self.msg
- return "<{} instance>".format(self.__class__.__name__)
+ return f"<{self.__class__.__name__} instance>"
__str__ = __repr__
# Ideally would just be `exit.Exception = Exit` etc.
_F = TypeVar("_F", bound=Callable[..., object])
-_ET = TypeVar("_ET", bound="Type[BaseException]")
+_ET = TypeVar("_ET", bound=Type[BaseException])
class _WithException(Protocol[_F, _ET]):
- Exception = None # type: _ET
- __call__ = None # type: _F
+ Exception: _ET
+ __call__: _F
def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]:
__import__(modname)
except ImportError as exc:
if reason is None:
- reason = "could not import {!r}: {}".format(modname, exc)
+ reason = f"could not import {modname!r}: {exc}"
raise Skipped(reason, allow_module_level=True) from None
mod = sys.modules[modname]
if minversion is None:
params = {"code": contents, "lexer": "text", "expiry": "1week"}
url = "https://bpaste.net"
try:
- response = (
+ response: str = (
urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8")
- ) # type: str
+ )
except OSError as exc_info: # urllib errors
return "bad response: %s" % exc_info
m = re.search(r'href="/raw/(\w+)"', response)
s = file.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
- terminalreporter.write_line("{} --> {}".format(msg, pastebinurl))
+ terminalreporter.write_line(f"{msg} --> {pastebinurl}")
import uuid
import warnings
from enum import Enum
+from errno import EBADF
+from errno import ELOOP
+from errno import ENOENT
+from errno import ENOTDIR
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
+from pathlib import Path
+from pathlib import PurePath
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Callable
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
-if sys.version_info[:2] >= (3, 6):
- from pathlib import Path, PurePath
-else:
- from pathlib2 import Path, PurePath
+LOCK_TIMEOUT = 60 * 60 * 24 * 3
-__all__ = ["Path", "PurePath"]
+_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
-LOCK_TIMEOUT = 60 * 60 * 24 * 3
+# The following function, variables and comments were
+# copied from cpython 3.9 Lib/pathlib.py file.
+# EBADF - guard against macOS `stat` throwing EBADF
+_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP)
-_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
+_IGNORED_WINERRORS = (
+ 21, # ERROR_NOT_READY - drive exists but is not accessible
+ 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
+)
+
+
+def _ignore_error(exception):
+ return (
+ getattr(exception, "errno", None) in _IGNORED_ERRORS
+ or getattr(exception, "winerror", None) in _IGNORED_WINERRORS
+ )
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
if not isinstance(excvalue, PermissionError):
warnings.warn(
- PytestWarning(
- "(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
- )
+ PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}")
)
return False
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
- new_path = root.joinpath("{}{}".format(prefix, new_number))
+ new_path = root.joinpath(f"{prefix}{new_number}")
try:
new_path.mkdir()
except Exception:
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
- raise OSError("cannot create lockfile in {path}".format(path=p)) from e
+ raise OSError(f"cannot create lockfile in {p}") from e
else:
pid = os.getpid()
spid = str(pid).encode()
lock_path = create_cleanup_lock(path)
parent = path.parent
- garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
+ garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
path.rename(garbage)
rm_rf(garbage)
except OSError:
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
- pattern = "*{}{}".format(os.sep, pattern)
+ pattern = f"*{os.sep}{pattern}"
return fnmatch.fnmatch(name, pattern)
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
- skip("symlinks not supported: {}".format(e))
+ skip(f"symlinks not supported: {e}")
class ImportMode(Enum):
def import_path(
p: Union[str, py.path.local, Path],
*,
- mode: Union[str, ImportMode] = ImportMode.prepend
+ mode: Union[str, ImportMode] = ImportMode.prepend,
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
Entries at each directory level are sorted.
"""
- entries = sorted(os.scandir(path), key=lambda entry: entry.name)
+
+ # Skip entries with symlink loops and other brokenness, so the caller doesn't
+ # have to deal with it.
+ entries = []
+ for entry in os.scandir(path):
+ try:
+ entry.is_file()
+ except OSError as err:
+ if _ignore_error(err):
+ continue
+ raise
+ entries.append(entry)
+
+ entries.sort(key=lambda entry: entry.name)
+
yield from entries
+
for entry in entries:
- if entry.is_dir(follow_symlinks=False) and recurse(entry):
+ if entry.is_dir() and recurse(entry):
yield from visit(entry.path, recurse)
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
- no common part."""
+ no common part.
+
+ If one path is relative and one is absolute, returns None.
+ """
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
+ The paths must be either both absolute or both relative.
+
If no such path can be determined, returns dest.
"""
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
- # Can be the case on Windows.
+ # Can be the case on Windows for two absolute paths on different drives.
+ # Can be the case for two relative paths without common prefix.
+ # Can be the case for a relative path and an absolute path.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
-"""(Disabled by default) support for testing pytest and pytest plugins."""
+"""(Disabled by default) support for testing pytest and pytest plugins.
+
+PYTEST_DONT_REWRITE
+"""
import collections.abc
+import contextlib
import gc
import importlib
import os
import platform
import re
+import shutil
import subprocess
import sys
import traceback
from fnmatch import fnmatch
from io import StringIO
+from pathlib import Path
+from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
+from typing import overload
from typing import Sequence
+from typing import TextIO
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
from weakref import WeakKeyDictionary
+import attr
import py
from iniconfig import IniConfig
+from iniconfig import SectionWrapper
-import pytest
from _pytest import timing
from _pytest._code import Source
from _pytest.capture import _get_multicapture
from _pytest.compat import final
-from _pytest.compat import overload
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
+from _pytest.config import hookimpl
+from _pytest.config import main
from _pytest.config import PytestPluginManager
from _pytest.config.argparsing import Parser
+from _pytest.deprecated import check_ispytest
+from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.main import Session
from _pytest.monkeypatch import MonkeyPatch
from _pytest.nodes import Collector
from _pytest.nodes import Item
+from _pytest.outcomes import fail
+from _pytest.outcomes import importorskip
+from _pytest.outcomes import skip
from _pytest.pathlib import make_numbered_dir
-from _pytest.pathlib import Path
-from _pytest.python import Module
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
-from _pytest.tmpdir import TempdirFactory
+from _pytest.tmpdir import TempPathFactory
+from _pytest.warning_types import PytestWarning
if TYPE_CHECKING:
- from typing import Type
from typing_extensions import Literal
import pexpect
+pytest_plugins = ["pytester_assertions"]
+
+
IGNORE_PAM = [ # filenames added when obtaining details about the current user
"/var/lib/sss/mc/passwd"
]
else:
return True
- @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ @hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]:
lines1 = self.get_open_files()
yield
"*** function %s:%s: %s " % item.location,
"See issue #2366",
]
- item.warn(pytest.PytestWarning("\n".join(error)))
+ item.warn(PytestWarning("\n".join(error)))
# used at least by pytest-xdist plugin
-@pytest.fixture
+@fixture
def _pytest(request: FixtureRequest) -> "PytestArg":
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
class PytestArg:
def __init__(self, request: FixtureRequest) -> None:
- self.request = request
+ self._request = request
def gethookrecorder(self, hook) -> "HookRecorder":
hookrecorder = HookRecorder(hook._pm)
- self.request.addfinalizer(hookrecorder.finish_recording)
+ self._request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def __repr__(self) -> str:
d = self.__dict__.copy()
del d["_name"]
- return "<ParsedCall {!r}(**{!r})>".format(self._name, d)
+ return f"<ParsedCall {self._name!r}(**{d!r})>"
if TYPE_CHECKING:
# The class has undetermined attributes, this tells mypy about it.
def __init__(self, pluginmanager: PytestPluginManager) -> None:
self._pluginmanager = pluginmanager
- self.calls = [] # type: List[ParsedCall]
- self.ret = None # type: Optional[Union[int, ExitCode]]
+ self.calls: List[ParsedCall] = []
+ self.ret: Optional[Union[int, ExitCode]] = None
def before(hook_name: str, hook_impls, kwargs) -> None:
self.calls.append(ParsedCall(hook_name, kwargs))
break
print("NONAMEMATCH", name, "with", call)
else:
- pytest.fail("could not find {!r} check {!r}".format(name, check))
+ fail(f"could not find {name!r} check {check!r}")
def popcall(self, name: str) -> ParsedCall:
__tracebackhide__ = True
if call._name == name:
del self.calls[i]
return call
- lines = ["could not find call {!r}, in:".format(name)]
+ lines = [f"could not find call {name!r}, in:"]
lines.extend([" %s" % x for x in self.calls])
- pytest.fail("\n".join(lines))
+ fail("\n".join(lines))
def getcall(self, name: str) -> ParsedCall:
values = self.getcalls(name)
) -> Sequence[CollectReport]:
...
- @overload # noqa: F811
- def getreports( # noqa: F811
+ @overload
+ def getreports(
self, names: "Literal['pytest_runtest_logreport']",
) -> Sequence[TestReport]:
...
- @overload # noqa: F811
- def getreports( # noqa: F811
+ @overload
+ def getreports(
self,
names: Union[str, Iterable[str]] = (
"pytest_collectreport",
) -> Sequence[Union[CollectReport, TestReport]]:
...
- def getreports( # noqa: F811
+ def getreports(
self,
names: Union[str, Iterable[str]] = (
"pytest_collectreport",
) -> Sequence[CollectReport]:
...
- @overload # noqa: F811
- def getfailures( # noqa: F811
+ @overload
+ def getfailures(
self, names: "Literal['pytest_runtest_logreport']",
) -> Sequence[TestReport]:
...
- @overload # noqa: F811
- def getfailures( # noqa: F811
+ @overload
+ def getfailures(
self,
names: Union[str, Iterable[str]] = (
"pytest_collectreport",
) -> Sequence[Union[CollectReport, TestReport]]:
...
- def getfailures( # noqa: F811
+ def getfailures(
self,
names: Union[str, Iterable[str]] = (
"pytest_collectreport",
elif rep.skipped:
skipped.append(rep)
else:
- assert rep.failed, "Unexpected outcome: {!r}".format(rep)
+ assert rep.failed, f"Unexpected outcome: {rep!r}"
failed.append(rep)
return passed, skipped, failed
def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None:
__tracebackhide__ = True
+ from _pytest.pytester_assertions import assertoutcome
outcomes = self.listoutcomes()
- realpassed, realskipped, realfailed = outcomes
- obtained = {
- "passed": len(realpassed),
- "skipped": len(realskipped),
- "failed": len(realfailed),
- }
- expected = {"passed": passed, "skipped": skipped, "failed": failed}
- assert obtained == expected, outcomes
+ assertoutcome(
+ outcomes, passed=passed, skipped=skipped, failed=failed,
+ )
def clear(self) -> None:
self.calls[:] = []
-@pytest.fixture
+@fixture
def linecomp() -> "LineComp":
"""A :class: `LineComp` instance for checking that an input linearly
contains a sequence of strings."""
return LineComp()
-@pytest.fixture(name="LineMatcher")
-def LineMatcher_fixture(request: FixtureRequest) -> "Type[LineMatcher]":
+@fixture(name="LineMatcher")
+def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
"""A reference to the :class: `LineMatcher`.
This is instantiable with a list of lines (without their trailing newlines).
return LineMatcher
-@pytest.fixture
-def testdir(request: FixtureRequest, tmpdir_factory: TempdirFactory) -> "Testdir":
- """A :class: `TestDir` instance, that can be used to run and test pytest itself.
+@fixture
+def pytester(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> "Pytester":
+ """
+ Facilities to write tests/configuration files, execute pytest in isolation, and match
+ against expected output, perfect for black-box testing of pytest plugins.
+
+ It attempts to isolate the test run from external factors as much as possible, modifying
+ the current working directory to ``path`` and environment variables during initialization.
+
+ It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path`
+ fixture but provides methods which aid in testing pytest itself.
+ """
+ return Pytester(request, tmp_path_factory, _ispytest=True)
+
+
+@fixture
+def testdir(pytester: "Pytester") -> "Testdir":
+ """
+ Identical to :fixture:`pytester`, and provides an instance whose methods return
+ legacy ``py.path.local`` objects instead when applicable.
- It is particularly useful for testing plugins. It is similar to the `tmpdir` fixture
- but provides methods which aid in testing pytest itself.
+ New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`.
"""
- return Testdir(request, tmpdir_factory)
+ return Testdir(pytester, _ispytest=True)
-@pytest.fixture
+@fixture
def _sys_snapshot() -> Generator[None, None, None]:
snappaths = SysPathsSnapshot()
snapmods = SysModulesSnapshot()
snappaths.restore()
-@pytest.fixture
+@fixture
def _config_for_test() -> Generator[Config, None, None]:
from _pytest.config import get_config
duration: float,
) -> None:
try:
- self.ret = pytest.ExitCode(ret) # type: Union[int, ExitCode]
+ self.ret: Union[int, ExitCode] = ExitCode(ret)
"""The return value."""
except ValueError:
self.ret = ret
self.stdout = LineMatcher(outlines)
""":class:`LineMatcher` of stdout.
- Use e.g. :func:`stdout.str() <LineMatcher.str()>` to reconstruct stdout, or the commonly used
+ Use e.g. :func:`str(stdout) <LineMatcher.__str__()>` to reconstruct stdout, or the commonly used
:func:`stdout.fnmatch_lines() <LineMatcher.fnmatch_lines()>` method.
"""
self.stderr = LineMatcher(errlines)
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run."""
__tracebackhide__ = True
-
- d = self.parseoutcomes()
- obtained = {
- "passed": d.get("passed", 0),
- "skipped": d.get("skipped", 0),
- "failed": d.get("failed", 0),
- "errors": d.get("errors", 0),
- "xpassed": d.get("xpassed", 0),
- "xfailed": d.get("xfailed", 0),
- }
- expected = {
- "passed": passed,
- "skipped": skipped,
- "failed": failed,
- "errors": errors,
- "xpassed": xpassed,
- "xfailed": xfailed,
- }
- assert obtained == expected
+ from _pytest.pytester_assertions import assert_outcomes
+
+ outcomes = self.parseoutcomes()
+ assert_outcomes(
+ outcomes,
+ passed=passed,
+ skipped=skipped,
+ failed=failed,
+ errors=errors,
+ xpassed=xpassed,
+ xfailed=xfailed,
+ )
class CwdSnapshot:
@final
-class Testdir:
- """Temporary test directory with tools to test/run pytest itself.
+class Pytester:
+ """
+ Facilities to write tests/configuration files, execute pytest in isolation, and match
+ against expected output, perfect for black-box testing of pytest plugins.
- This is based on the :fixture:`tmpdir` fixture but provides a number of methods
- which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
- methods will use :py:attr:`tmpdir` as their current working directory.
+ It attempts to isolate the test run from external factors as much as possible, modifying
+ the current working directory to ``path`` and environment variables during initialization.
Attributes:
- :ivar tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
+ :ivar Path path: temporary directory path used to create files/run tests from, etc.
:ivar plugins:
A list of plugins to use with :py:meth:`parseconfig` and
class TimeoutExpired(Exception):
pass
- def __init__(self, request: FixtureRequest, tmpdir_factory: TempdirFactory) -> None:
- self.request = request
- self._mod_collections = (
- WeakKeyDictionary()
- ) # type: WeakKeyDictionary[Module, List[Union[Item, Collector]]]
+ def __init__(
+ self,
+ request: FixtureRequest,
+ tmp_path_factory: TempPathFactory,
+ *,
+ _ispytest: bool = False,
+ ) -> None:
+ check_ispytest(_ispytest)
+ self._request = request
+ self._mod_collections: WeakKeyDictionary[
+ Collector, List[Union[Item, Collector]]
+ ] = (WeakKeyDictionary())
if request.function:
- name = request.function.__name__ # type: str
+ name: str = request.function.__name__
else:
name = request.node.name
self._name = name
- self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
- self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True)
- self.plugins = [] # type: List[Union[str, _PluggyPlugin]]
+ self._path: Path = tmp_path_factory.mktemp(name, numbered=True)
+ self.plugins: List[Union[str, _PluggyPlugin]] = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
- self.request.addfinalizer(self.finalize)
- self._method = self.request.config.getoption("--runpytest")
+ self._request.addfinalizer(self._finalize)
+ self._method = self._request.config.getoption("--runpytest")
+ self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True)
- mp = self.monkeypatch = MonkeyPatch()
- mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self.test_tmproot))
+ self._monkeypatch = mp = MonkeyPatch()
+ mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot))
# Ensure no unexpected caching via tox.
mp.delenv("TOX_ENV_DIR", raising=False)
# Discard outer pytest options.
mp.delenv("PYTEST_ADDOPTS", raising=False)
# Ensure no user config is used.
- tmphome = str(self.tmpdir)
+ tmphome = str(self.path)
mp.setenv("HOME", tmphome)
mp.setenv("USERPROFILE", tmphome)
# Do not use colors for inner runs by default.
mp.setenv("PY_COLORS", "0")
- def __repr__(self) -> str:
- return "<Testdir {!r}>".format(self.tmpdir)
+ @property
+ def path(self) -> Path:
+ """Temporary directory where files are created and pytest is executed."""
+ return self._path
- def __str__(self) -> str:
- return str(self.tmpdir)
+ def __repr__(self) -> str:
+ return f"<Pytester {self.path!r}>"
- def finalize(self) -> None:
- """Clean up global state artifacts.
+ def _finalize(self) -> None:
+ """
+ Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
- clean this up. It does not remove the temporary directory however so
+ clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
- self.monkeypatch.undo()
+ self._monkeypatch.undo()
def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
# Some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example.
+ #
+ # Preserve readline due to https://bugs.python.org/issue41033.
+ # pexpect issues a SIGWINCH.
def preserve_module(name):
- return name.startswith("zope")
+ return name.startswith(("zope", "readline"))
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder:
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
- self.request.addfinalizer(reprec.finish_recording)
+ self._request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self) -> None:
This is done automatically upon instantiation.
"""
- self.tmpdir.chdir()
+ os.chdir(self.path)
- def _makefile(self, ext: str, lines, files, encoding: str = "utf-8"):
+ def _makefile(
+ self,
+ ext: str,
+ lines: Sequence[Union[Any, bytes]],
+ files: Dict[str, str],
+ encoding: str = "utf-8",
+ ) -> Path:
items = list(files.items())
- def to_text(s):
+ def to_text(s: Union[Any, bytes]) -> str:
return s.decode(encoding) if isinstance(s, bytes) else str(s)
if lines:
ret = None
for basename, value in items:
- p = self.tmpdir.join(basename).new(ext=ext)
- p.dirpath().ensure_dir()
+ p = self.path.joinpath(basename).with_suffix(ext)
+ p.parent.mkdir(parents=True, exist_ok=True)
source_ = Source(value)
source = "\n".join(to_text(line) for line in source_.lines)
- p.write(source.strip().encode(encoding), "wb")
+ p.write_text(source.strip(), encoding=encoding)
if ret is None:
ret = p
+ assert ret is not None
return ret
- def makefile(self, ext: str, *args: str, **kwargs):
- r"""Create new file(s) in the testdir.
+ def makefile(self, ext: str, *args: str, **kwargs: str) -> Path:
+ r"""Create new file(s) in the test directory.
:param str ext:
The extension the file(s) should use, including the dot, e.g. `.py`.
.. code-block:: python
- testdir.makefile(".txt", "line1", "line2")
+ pytester.makefile(".txt", "line1", "line2")
- testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
+ pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
"""
return self._makefile(ext, args, kwargs)
- def makeconftest(self, source):
+ def makeconftest(self, source: str) -> Path:
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
- def makeini(self, source):
+ def makeini(self, source: str) -> Path:
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
- def getinicfg(self, source) -> IniConfig:
+ def getinicfg(self, source: str) -> SectionWrapper:
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
- return IniConfig(p)["pytest"]
+ return IniConfig(str(p))["pytest"]
- def makepyprojecttoml(self, source):
+ def makepyprojecttoml(self, source: str) -> Path:
"""Write a pyproject.toml file with 'source' as contents.
.. versionadded:: 6.0
"""
return self.makefile(".toml", pyproject=source)
- def makepyfile(self, *args, **kwargs):
+ def makepyfile(self, *args, **kwargs) -> Path:
r"""Shortcut for .makefile() with a .py extension.
Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting
.. code-block:: python
- def test_something(testdir):
+ def test_something(pytester):
# Initial file is created test_something.py.
- testdir.makepyfile("foobar")
+ pytester.makepyfile("foobar")
# To create multiple files, pass kwargs accordingly.
- testdir.makepyfile(custom="foobar")
+ pytester.makepyfile(custom="foobar")
# At this point, both 'test_something.py' & 'custom.py' exist in the test directory.
"""
return self._makefile(".py", args, kwargs)
- def maketxtfile(self, *args, **kwargs):
+ def maketxtfile(self, *args, **kwargs) -> Path:
r"""Shortcut for .makefile() with a .txt extension.
Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting
.. code-block:: python
- def test_something(testdir):
+ def test_something(pytester):
# Initial file is created test_something.txt.
- testdir.maketxtfile("foobar")
+ pytester.maketxtfile("foobar")
# To create multiple files, pass kwargs accordingly.
- testdir.maketxtfile(custom="foobar")
+ pytester.maketxtfile(custom="foobar")
# At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory.
"""
return self._makefile(".txt", args, kwargs)
- def syspathinsert(self, path=None) -> None:
+ def syspathinsert(
+ self, path: Optional[Union[str, "os.PathLike[str]"]] = None
+ ) -> None:
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
- path = self.tmpdir
+ path = self.path
- self.monkeypatch.syspath_prepend(str(path))
+ self._monkeypatch.syspath_prepend(str(path))
- def mkdir(self, name) -> py.path.local:
+ def mkdir(self, name: str) -> Path:
"""Create a new (sub)directory."""
- return self.tmpdir.mkdir(name)
+ p = self.path / name
+ p.mkdir()
+ return p
- def mkpydir(self, name) -> py.path.local:
- """Create a new Python package.
+ def mkpydir(self, name: str) -> Path:
+ """Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a Python package.
"""
- p = self.mkdir(name)
- p.ensure("__init__.py")
+ p = self.path / name
+ p.mkdir()
+ p.joinpath("__init__.py").touch()
return p
- def copy_example(self, name=None) -> py.path.local:
+ def copy_example(self, name: Optional[str] = None) -> Path:
"""Copy file from project's directory into the testdir.
:param str name: The name of the file to copy.
- :returns: Path to the copied directory (inside ``self.tmpdir``).
- """
- import warnings
- from _pytest.warning_types import PYTESTER_COPY_EXAMPLE
+ :return: path to the copied directory (inside ``self.path``).
- warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2)
- example_dir = self.request.config.getini("pytester_example_dir")
+ """
+ example_dir = self._request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
- example_dir = self.request.config.rootdir.join(example_dir)
+ example_dir = Path(str(self._request.config.rootdir)) / example_dir
- for extra_element in self.request.node.iter_markers("pytester_example_path"):
+ for extra_element in self._request.node.iter_markers("pytester_example_path"):
assert extra_element.args
- example_dir = example_dir.join(*extra_element.args)
+ example_dir = example_dir.joinpath(*extra_element.args)
if name is None:
func_name = self._name
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
- if maybe_dir.isdir():
+ if maybe_dir.is_dir():
example_path = maybe_dir
- elif maybe_file.isfile():
+ elif maybe_file.is_file():
example_path = maybe_file
else:
raise LookupError(
- "{} cant be found as module or package in {}".format(
- func_name, example_dir.bestrelpath(self.request.config.rootdir)
- )
+ f"{func_name} can't be found as module or package in {example_dir}"
)
else:
- example_path = example_dir.join(name)
-
- if example_path.isdir() and not example_path.join("__init__.py").isfile():
- example_path.copy(self.tmpdir)
- return self.tmpdir
- elif example_path.isfile():
- result = self.tmpdir.join(example_path.basename)
- example_path.copy(result)
+ example_path = example_dir.joinpath(name)
+
+ if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file():
+ # TODO: py.path.local.copy can copy files to existing directories,
+ # while with shutil.copytree the destination directory cannot exist,
+ # we will need to roll our own in order to drop py.path.local completely
+ py.path.local(example_path).copy(py.path.local(self.path))
+ return self.path
+ elif example_path.is_file():
+ result = self.path.joinpath(example_path.name)
+ shutil.copy(example_path, result)
return result
else:
raise LookupError(
- 'example "{}" is not found as a file or directory'.format(example_path)
+ f'example "{example_path}" is not found as a file or directory'
)
Session = Session
- def getnode(self, config: Config, arg):
+ def getnode(
+ self, config: Config, arg: Union[str, "os.PathLike[str]"]
+ ) -> Optional[Union[Collector, Item]]:
"""Return the collection node of a file.
:param _pytest.config.Config config:
config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
return res
- def getpathnode(self, path):
+ def getpathnode(self, path: Union[str, "os.PathLike[str]"]):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
:param py.path.local path: Path to the file.
"""
+ path = py.path.local(path)
config = self.parseconfigure(path)
session = Session.from_config(config)
x = session.fspath.bestrelpath(path)
test items contained within.
"""
session = colitems[0].session
- result = [] # type: List[Item]
+ result: List[Item] = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
- def runitem(self, source):
+ def runitem(self, source: str) -> Any:
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
- testclassinstance = self.request.instance
+ testclassinstance = self._request.instance
runner = testclassinstance.getrunner()
return runner(item)
- def inline_runsource(self, source, *cmdlineargs) -> HookRecorder:
+ def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder:
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
return items, rec
def inline_run(
- self, *args, plugins=(), no_reraise_ctrlc: bool = False
+ self,
+ *args: Union[str, "os.PathLike[str]"],
+ plugins=(),
+ no_reraise_ctrlc: bool = False,
) -> HookRecorder:
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins.append(Collect())
- ret = pytest.main(list(args), plugins=plugins)
+ ret = main([str(x) for x in args], plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec: # type: ignore
pass
- reprec.ret = ret
+ reprec.ret = ret # type: ignore
# Typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing.
for finalizer in finalizers:
finalizer()
- def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
+ def runpytest_inprocess(
+ self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any
+ ) -> RunResult:
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides."""
syspathinsert = kwargs.pop("syspathinsert", False)
res.reprec = reprec # type: ignore
return res
- def runpytest(self, *args, **kwargs) -> RunResult:
+ def runpytest(
+ self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any
+ ) -> RunResult:
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`."""
- args = self._ensure_basetemp(args)
+ new_args = self._ensure_basetemp(args)
if self._method == "inprocess":
- return self.runpytest_inprocess(*args, **kwargs)
+ return self.runpytest_inprocess(*new_args, **kwargs)
elif self._method == "subprocess":
- return self.runpytest_subprocess(*args, **kwargs)
- raise RuntimeError("Unrecognized runpytest option: {}".format(self._method))
-
- def _ensure_basetemp(self, args):
- args = list(args)
- for x in args:
+ return self.runpytest_subprocess(*new_args, **kwargs)
+ raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
+
+ def _ensure_basetemp(
+ self, args: Sequence[Union[str, "os.PathLike[str]"]]
+ ) -> List[Union[str, "os.PathLike[str]"]]:
+ new_args = list(args)
+ for x in new_args:
if str(x).startswith("--basetemp"):
break
else:
- args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
- return args
+ new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp"))
+ return new_args
- def parseconfig(self, *args) -> Config:
+ def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config:
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
- args = self._ensure_basetemp(args)
-
import _pytest.config
- config = _pytest.config._prepareconfig(args, self.plugins) # type: ignore[arg-type]
+ new_args = self._ensure_basetemp(args)
+ new_args = [str(x) for x in new_args]
+
+ config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type]
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
- self.request.addfinalizer(config._ensure_unconfigure)
+ self._request.addfinalizer(config._ensure_unconfigure)
return config
- def parseconfigure(self, *args) -> Config:
+ def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config:
"""Return a new pytest configured Config instance.
Returns a new :py:class:`_pytest.config.Config` instance like
config._do_configure()
return config
- def getitem(self, source, funcname: str = "test_func") -> Item:
+ def getitem(self, source: str, funcname: str = "test_func") -> Item:
"""Return the test item for a test function.
Writes the source to a python file and runs pytest's collection on
funcname, source, items
)
- def getitems(self, source) -> List[Item]:
+ def getitems(self, source: str) -> List[Item]:
"""Return all test items collected from the module.
Writes the source to a Python file and runs pytest's collection on
modcol = self.getmodulecol(source)
return self.genitems([modcol])
- def getmodulecol(self, source, configargs=(), withinit: bool = False):
+ def getmodulecol(
+ self, source: Union[str, Path], configargs=(), *, withinit: bool = False
+ ):
"""Return the module collection node for ``source``.
Writes ``source`` to a file using :py:meth:`makepyfile` and then
directory to ensure it is a package.
"""
if isinstance(source, Path):
- path = self.tmpdir.join(str(source))
+ path = self.path.joinpath(source)
assert not withinit, "not supported for paths"
else:
- kw = {self._name: Source(source).strip()}
+ kw = {self._name: str(source)}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
return self.getnode(config, path)
def collect_by_name(
- self, modcol: Module, name: str
+ self, modcol: Collector, name: str
) -> Optional[Union[Item, Collector]]:
"""Return the collection node for name from the module collection.
def popen(
self,
cmdargs,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ stdout: Union[int, TextIO] = subprocess.PIPE,
+ stderr: Union[int, TextIO] = subprocess.PIPE,
stdin=CLOSE_STDIN,
- **kw
+ **kw,
):
"""Invoke subprocess.Popen.
)
kw["env"] = env
- if stdin is Testdir.CLOSE_STDIN:
+ if stdin is self.CLOSE_STDIN:
kw["stdin"] = subprocess.PIPE
elif isinstance(stdin, bytes):
kw["stdin"] = subprocess.PIPE
kw["stdin"] = stdin
popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
- if stdin is Testdir.CLOSE_STDIN:
+ if stdin is self.CLOSE_STDIN:
assert popen.stdin is not None
popen.stdin.close()
elif isinstance(stdin, bytes):
return popen
def run(
- self, *cmdargs, timeout: Optional[float] = None, stdin=CLOSE_STDIN
+ self,
+ *cmdargs: Union[str, "os.PathLike[str]"],
+ timeout: Optional[float] = None,
+ stdin=CLOSE_STDIN,
) -> RunResult:
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
- :param args:
- The sequence of arguments to pass to `subprocess.Popen()`.
+ :param cmdargs:
+ The sequence of arguments to pass to `subprocess.Popen()`, with path-like objects
+ being converted to ``str`` automatically.
:param timeout:
The period in seconds after which to timeout and raise
- :py:class:`Testdir.TimeoutExpired`.
+ :py:class:`Pytester.TimeoutExpired`.
:param stdin:
Optional standard input. Bytes are being send, closing
the pipe, otherwise it is passed through to ``popen``.
"""
__tracebackhide__ = True
+ # TODO: Remove type ignore in next mypy release.
+ # https://github.com/python/typeshed/pull/4582
cmdargs = tuple(
- str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs
+ os.fspath(arg) if isinstance(arg, os.PathLike) else arg for arg in cmdargs # type: ignore[misc]
)
- p1 = self.tmpdir.join("stdout")
- p2 = self.tmpdir.join("stderr")
+ p1 = self.path.joinpath("stdout")
+ p2 = self.path.joinpath("stderr")
print("running:", *cmdargs)
- print(" in:", py.path.local())
- f1 = open(str(p1), "w", encoding="utf8")
- f2 = open(str(p2), "w", encoding="utf8")
- try:
+ print(" in:", Path.cwd())
+
+ with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2:
now = timing.time()
popen = self.popen(
cmdargs,
stderr=f2,
close_fds=(sys.platform != "win32"),
)
- if isinstance(stdin, bytes):
+ if popen.stdin is not None:
popen.stdin.close()
def handle_timeout() -> None:
ret = popen.wait(timeout)
except subprocess.TimeoutExpired:
handle_timeout()
- finally:
- f1.close()
- f2.close()
- f1 = open(str(p1), encoding="utf8")
- f2 = open(str(p2), encoding="utf8")
- try:
+
+ with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2:
out = f1.read().splitlines()
err = f2.read().splitlines()
- finally:
- f1.close()
- f2.close()
+
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
- try:
+
+ with contextlib.suppress(ValueError):
ret = ExitCode(ret)
- except ValueError:
- pass
return RunResult(ret, out, err, timing.time() - now)
def _dump_lines(self, lines, fp):
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
- print("couldn't print to {} because of encoding".format(fp))
+ print(f"couldn't print to {fp} because of encoding")
def _getpytestargs(self) -> Tuple[str, ...]:
return sys.executable, "-mpytest"
The sequence of arguments to pass to the pytest subprocess.
:param timeout:
The period in seconds after which to timeout and raise
- :py:class:`Testdir.TimeoutExpired`.
+ :py:class:`Pytester.TimeoutExpired`.
:rtype: RunResult
"""
__tracebackhide__ = True
- p = make_numbered_dir(root=Path(str(self.tmpdir)), prefix="runpytest-")
+ p = make_numbered_dir(root=self.path, prefix="runpytest-")
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
The pexpect child is returned.
"""
- basetemp = self.tmpdir.mkdir("temp-pexpect")
+ basetemp = self.path / "temp-pexpect"
+ basetemp.mkdir()
invoke = " ".join(map(str, self._getpytestargs()))
- cmd = "{} --basetemp={} {}".format(invoke, basetemp, string)
+ cmd = f"{invoke} --basetemp={basetemp} {string}"
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
The pexpect child is returned.
"""
- pexpect = pytest.importorskip("pexpect", "3.0")
+ pexpect = importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
- pytest.skip("pypy-64 bit not supported")
+ skip("pypy-64 bit not supported")
if not hasattr(pexpect, "spawn"):
- pytest.skip("pexpect.spawn not available")
- logfile = self.tmpdir.join("spawn.out").open("wb")
+ skip("pexpect.spawn not available")
+ logfile = self.path.joinpath("spawn.out").open("wb")
- child = pexpect.spawn(cmd, logfile=logfile)
- self.request.addfinalizer(logfile.close)
- child.timeout = expect_timeout
+ child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout)
+ self._request.addfinalizer(logfile.close)
return child
LineMatcher(lines1).fnmatch_lines(lines2)
+@final
+@attr.s(repr=False, str=False, init=False)
+class Testdir:
+ """
+ Similar to :class:`Pytester`, but this class works with legacy py.path.local objects instead.
+
+ All methods just forward to an internal :class:`Pytester` instance, converting results
+ to `py.path.local` objects as necessary.
+ """
+
+ __test__ = False
+
+ CLOSE_STDIN = Pytester.CLOSE_STDIN
+ TimeoutExpired = Pytester.TimeoutExpired
+ Session = Pytester.Session
+
+ def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None:
+ check_ispytest(_ispytest)
+ self._pytester = pytester
+
+ @property
+ def tmpdir(self) -> py.path.local:
+ """Temporary directory where tests are executed."""
+ return py.path.local(self._pytester.path)
+
+ @property
+ def test_tmproot(self) -> py.path.local:
+ return py.path.local(self._pytester._test_tmproot)
+
+ @property
+ def request(self):
+ return self._pytester._request
+
+ @property
+ def plugins(self):
+ return self._pytester.plugins
+
+ @plugins.setter
+ def plugins(self, plugins):
+ self._pytester.plugins = plugins
+
+ @property
+ def monkeypatch(self) -> MonkeyPatch:
+ return self._pytester._monkeypatch
+
+ def make_hook_recorder(self, pluginmanager) -> HookRecorder:
+ """See :meth:`Pytester.make_hook_recorder`."""
+ return self._pytester.make_hook_recorder(pluginmanager)
+
+ def chdir(self) -> None:
+ """See :meth:`Pytester.chdir`."""
+ return self._pytester.chdir()
+
+ def finalize(self) -> None:
+ """See :meth:`Pytester._finalize`."""
+ return self._pytester._finalize()
+
+ def makefile(self, ext, *args, **kwargs) -> py.path.local:
+ """See :meth:`Pytester.makefile`."""
+ return py.path.local(str(self._pytester.makefile(ext, *args, **kwargs)))
+
+ def makeconftest(self, source) -> py.path.local:
+ """See :meth:`Pytester.makeconftest`."""
+ return py.path.local(str(self._pytester.makeconftest(source)))
+
+ def makeini(self, source) -> py.path.local:
+ """See :meth:`Pytester.makeini`."""
+ return py.path.local(str(self._pytester.makeini(source)))
+
+ def getinicfg(self, source: str) -> SectionWrapper:
+ """See :meth:`Pytester.getinicfg`."""
+ return self._pytester.getinicfg(source)
+
+ def makepyprojecttoml(self, source) -> py.path.local:
+ """See :meth:`Pytester.makepyprojecttoml`."""
+ return py.path.local(str(self._pytester.makepyprojecttoml(source)))
+
+ def makepyfile(self, *args, **kwargs) -> py.path.local:
+ """See :meth:`Pytester.makepyfile`."""
+ return py.path.local(str(self._pytester.makepyfile(*args, **kwargs)))
+
+ def maketxtfile(self, *args, **kwargs) -> py.path.local:
+ """See :meth:`Pytester.maketxtfile`."""
+ return py.path.local(str(self._pytester.maketxtfile(*args, **kwargs)))
+
+ def syspathinsert(self, path=None) -> None:
+ """See :meth:`Pytester.syspathinsert`."""
+ return self._pytester.syspathinsert(path)
+
+ def mkdir(self, name) -> py.path.local:
+ """See :meth:`Pytester.mkdir`."""
+ return py.path.local(str(self._pytester.mkdir(name)))
+
+ def mkpydir(self, name) -> py.path.local:
+ """See :meth:`Pytester.mkpydir`."""
+ return py.path.local(str(self._pytester.mkpydir(name)))
+
+ def copy_example(self, name=None) -> py.path.local:
+ """See :meth:`Pytester.copy_example`."""
+ return py.path.local(str(self._pytester.copy_example(name)))
+
+ def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]:
+ """See :meth:`Pytester.getnode`."""
+ return self._pytester.getnode(config, arg)
+
+ def getpathnode(self, path):
+ """See :meth:`Pytester.getpathnode`."""
+ return self._pytester.getpathnode(path)
+
+ def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]:
+ """See :meth:`Pytester.genitems`."""
+ return self._pytester.genitems(colitems)
+
+ def runitem(self, source):
+ """See :meth:`Pytester.runitem`."""
+ return self._pytester.runitem(source)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ """See :meth:`Pytester.inline_runsource`."""
+ return self._pytester.inline_runsource(source, *cmdlineargs)
+
+ def inline_genitems(self, *args):
+ """See :meth:`Pytester.inline_genitems`."""
+ return self._pytester.inline_genitems(*args)
+
+ def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False):
+ """See :meth:`Pytester.inline_run`."""
+ return self._pytester.inline_run(
+ *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc
+ )
+
+ def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
+ """See :meth:`Pytester.runpytest_inprocess`."""
+ return self._pytester.runpytest_inprocess(*args, **kwargs)
+
+ def runpytest(self, *args, **kwargs) -> RunResult:
+ """See :meth:`Pytester.runpytest`."""
+ return self._pytester.runpytest(*args, **kwargs)
+
+ def parseconfig(self, *args) -> Config:
+ """See :meth:`Pytester.parseconfig`."""
+ return self._pytester.parseconfig(*args)
+
+ def parseconfigure(self, *args) -> Config:
+ """See :meth:`Pytester.parseconfigure`."""
+ return self._pytester.parseconfigure(*args)
+
+ def getitem(self, source, funcname="test_func"):
+ """See :meth:`Pytester.getitem`."""
+ return self._pytester.getitem(source, funcname)
+
+ def getitems(self, source):
+ """See :meth:`Pytester.getitems`."""
+ return self._pytester.getitems(source)
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ """See :meth:`Pytester.getmodulecol`."""
+ return self._pytester.getmodulecol(
+ source, configargs=configargs, withinit=withinit
+ )
+
+ def collect_by_name(
+ self, modcol: Collector, name: str
+ ) -> Optional[Union[Item, Collector]]:
+ """See :meth:`Pytester.collect_by_name`."""
+ return self._pytester.collect_by_name(modcol, name)
+
+ def popen(
+ self,
+ cmdargs,
+ stdout: Union[int, TextIO] = subprocess.PIPE,
+ stderr: Union[int, TextIO] = subprocess.PIPE,
+ stdin=CLOSE_STDIN,
+ **kw,
+ ):
+ """See :meth:`Pytester.popen`."""
+ return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw)
+
+ def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult:
+ """See :meth:`Pytester.run`."""
+ return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin)
+
+ def runpython(self, script) -> RunResult:
+ """See :meth:`Pytester.runpython`."""
+ return self._pytester.runpython(script)
+
+ def runpython_c(self, command):
+ """See :meth:`Pytester.runpython_c`."""
+ return self._pytester.runpython_c(command)
+
+ def runpytest_subprocess(self, *args, timeout=None) -> RunResult:
+ """See :meth:`Pytester.runpytest_subprocess`."""
+ return self._pytester.runpytest_subprocess(*args, timeout=timeout)
+
+ def spawn_pytest(
+ self, string: str, expect_timeout: float = 10.0
+ ) -> "pexpect.spawn":
+ """See :meth:`Pytester.spawn_pytest`."""
+ return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout)
+
+ def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
+ """See :meth:`Pytester.spawn`."""
+ return self._pytester.spawn(cmd, expect_timeout=expect_timeout)
+
+ def __repr__(self) -> str:
+ return f"<Testdir {self.tmpdir!r}>"
+
+ def __str__(self) -> str:
+ return str(self.tmpdir)
+
+
class LineMatcher:
"""Flexible matching of text.
def __init__(self, lines: List[str]) -> None:
self.lines = lines
- self._log_output = [] # type: List[str]
+ self._log_output: List[str] = []
+
+ def __str__(self) -> str:
+ """Return the entire original text.
+
+ .. versionadded:: 6.2
+ You can use :meth:`str` in older versions.
+ """
+ return "\n".join(self.lines)
def _getlines(self, lines2: Union[str, Sequence[str], Source]) -> Sequence[str]:
if isinstance(lines2, str):
match_func: Callable[[str, str], bool],
match_nickname: str,
*,
- consecutive: bool = False
+ consecutive: bool = False,
) -> None:
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
raise TypeError("invalid type for lines2: {}".format(type(lines2).__name__))
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
- nextline = None
extralines = []
__tracebackhide__ = True
wnick = len(match_nickname) + 1
break
else:
if consecutive and started:
- msg = "no consecutive match: {!r}".format(line)
+ msg = f"no consecutive match: {line!r}"
self._log(msg)
self._log(
"{:>{width}}".format("with:", width=wnick), repr(nextline)
self._log("{:>{width}}".format("and:", width=wnick), repr(nextline))
extralines.append(nextline)
else:
- msg = "remains unmatched: {!r}".format(line)
+ msg = f"remains unmatched: {line!r}"
self._log(msg)
self._fail(msg)
self._log_output = []
wnick = len(match_nickname) + 1
for line in self.lines:
if match_func(line, pat):
- msg = "{}: {!r}".format(match_nickname, pat)
+ msg = f"{match_nickname}: {pat!r}"
self._log(msg)
self._log("{:>{width}}".format("with:", width=wnick), repr(line))
self._fail(msg)
__tracebackhide__ = True
log_text = self._log_text
self._log_output = []
- pytest.fail(log_text)
+ fail(log_text)
def str(self) -> str:
"""Return the entire original text."""
- return "\n".join(self.lines)
+ return str(self)
--- /dev/null
+"""Helper plugin for pytester; should not be loaded on its own."""
+# This plugin contains assertions used by pytester. pytester cannot
+# contain them itself, since it is imported by the `pytest` module,
+# hence cannot be subject to assertion rewriting, which requires a
+# module to not be already imported.
+from typing import Dict
+from typing import Sequence
+from typing import Tuple
+from typing import Union
+
+from _pytest.reports import CollectReport
+from _pytest.reports import TestReport
+
+
+def assertoutcome(
+ outcomes: Tuple[
+ Sequence[TestReport],
+ Sequence[Union[CollectReport, TestReport]],
+ Sequence[Union[CollectReport, TestReport]],
+ ],
+ passed: int = 0,
+ skipped: int = 0,
+ failed: int = 0,
+) -> None:
+ __tracebackhide__ = True
+
+ realpassed, realskipped, realfailed = outcomes
+ obtained = {
+ "passed": len(realpassed),
+ "skipped": len(realskipped),
+ "failed": len(realfailed),
+ }
+ expected = {"passed": passed, "skipped": skipped, "failed": failed}
+ assert obtained == expected, outcomes
+
+
+def assert_outcomes(
+ outcomes: Dict[str, int],
+ passed: int = 0,
+ skipped: int = 0,
+ failed: int = 0,
+ errors: int = 0,
+ xpassed: int = 0,
+ xfailed: int = 0,
+) -> None:
+ """Assert that the specified outcomes appear with the respective
+ numbers (0 means it didn't occur) in the text output from a test run."""
+ __tracebackhide__ = True
+
+ obtained = {
+ "passed": outcomes.get("passed", 0),
+ "skipped": outcomes.get("skipped", 0),
+ "failed": outcomes.get("failed", 0),
+ "errors": outcomes.get("errors", 0),
+ "xpassed": outcomes.get("xpassed", 0),
+ "xfailed": outcomes.get("xfailed", 0),
+ }
+ expected = {
+ "passed": passed,
+ "skipped": skipped,
+ "failed": failed,
+ "errors": errors,
+ "xpassed": xpassed,
+ "xfailed": xfailed,
+ }
+ assert obtained == expected
import os
import sys
import types
-import typing
import warnings
from collections import Counter
from collections import defaultdict
-from collections.abc import Sequence
from functools import partial
from typing import Any
from typing import Callable
from typing import List
from typing import Mapping
from typing import Optional
+from typing import Sequence
from typing import Set
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
import py
from _pytest.compat import safe_getattr
from _pytest.compat import safe_isclass
from _pytest.compat import STRING_TYPES
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.warning_types import PytestUnhandledCoroutineWarning
if TYPE_CHECKING:
- from typing import Type
from typing_extensions import Literal
from _pytest.fixtures import _Scope
):
return None
ihook = parent.session.gethookproxy(path)
- module = ihook.pytest_pycollect_makemodule(
- path=path, parent=parent
- ) # type: Module
+ module: Module = ihook.pytest_pycollect_makemodule(path=path, parent=parent)
return module
return None
def pytest_pycollect_makemodule(path: py.path.local, parent) -> "Module":
if path.basename == "__init__.py":
- pkg = Package.from_parent(parent, fspath=path) # type: Package
+ pkg: Package = Package.from_parent(parent, fspath=path)
return pkg
- mod = Module.from_parent(parent, fspath=path) # type: Module
+ mod: Module = Module.from_parent(parent, fspath=path)
return mod
# Function and attributes that the mixin needs (for type-checking only).
if TYPE_CHECKING:
- name = "" # type: str
- parent = None # type: Optional[nodes.Node]
- own_markers = [] # type: List[Mark]
+ name: str = ""
+ parent: Optional[nodes.Node] = None
+ own_markers: List[Mark] = []
def getparent(self, cls: Type[nodes._NodeType]) -> Optional[nodes._NodeType]:
...
file_path = sys.modules[obj.__module__].__file__
if file_path.endswith(".pyc"):
file_path = file_path[:-1]
- fspath = file_path # type: Union[py.path.local, str]
+ fspath: Union[py.path.local, str] = file_path
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
dicts = [getattr(self.obj, "__dict__", {})]
for basecls in self.obj.__class__.__mro__:
dicts.append(basecls.__dict__)
- seen = set() # type: Set[str]
- values = [] # type: List[Union[nodes.Item, nodes.Collector]]
+ seen: Set[str] = set()
+ values: List[Union[nodes.Item, nodes.Collector]] = []
ihook = self.ihook
for dic in dicts:
# Note: seems like the dict can change during iteration -
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
- subname = "{}[{}]".format(name, callspec.id)
+ subname = f"{name}[{callspec.id}]"
yield Function.from_parent(
self,
name=subname,
if setup_module is None and teardown_module is None:
return
- @fixtures.fixture(autouse=True, scope="module")
+ @fixtures.fixture(
+ autouse=True,
+ scope="module",
+ # Use a unique name to speed up lookup.
+ name=f"xunit_setup_module_fixture_{self.obj.__name__}",
+ )
def xunit_setup_module_fixture(request) -> Generator[None, None, None]:
if setup_module is not None:
_call_with_optional_argument(setup_module, request.module)
if setup_function is None and teardown_function is None:
return
- @fixtures.fixture(autouse=True, scope="function")
+ @fixtures.fixture(
+ autouse=True,
+ scope="function",
+ # Use a unique name to speed up lookup.
+ name=f"xunit_setup_function_fixture_{self.obj.__name__}",
+ )
def xunit_setup_function_fixture(request) -> Generator[None, None, None]:
if request.instance is not None:
# in this case we are bound to an instance, so we need to let
def _collectfile(
self, path: py.path.local, handle_dupes: bool = True
- ) -> typing.Sequence[nodes.Collector]:
+ ) -> Sequence[nodes.Collector]:
assert (
path.isfile()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
init_module, self.config.getini("python_files")
):
yield Module.from_parent(self, fspath=init_module)
- pkg_prefixes = set() # type: Set[py.path.local]
+ pkg_prefixes: Set[py.path.local] = set()
for direntry in visit(str(this_path), recurse=self._recurse):
path = py.path.local(direntry.path)
if setup_class is None and teardown_class is None:
return
- @fixtures.fixture(autouse=True, scope="class")
+ @fixtures.fixture(
+ autouse=True,
+ scope="class",
+ # Use a unique name to speed up lookup.
+ name=f"xunit_setup_class_fixture_{self.obj.__qualname__}",
+ )
def xunit_setup_class_fixture(cls) -> Generator[None, None, None]:
if setup_class is not None:
func = getimfunc(setup_class)
if setup_method is None and teardown_method is None:
return
- @fixtures.fixture(autouse=True, scope="function")
+ @fixtures.fixture(
+ autouse=True,
+ scope="function",
+ # Use a unique name to speed up lookup.
+ name=f"xunit_setup_method_fixture_{self.obj.__qualname__}",
+ )
def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]:
method = request.function
if setup_method is not None:
def hasinit(obj: object) -> bool:
- init = getattr(obj, "__init__", None) # type: object
+ init: object = getattr(obj, "__init__", None)
if init:
return init != object.__init__
return False
def hasnew(obj: object) -> bool:
- new = getattr(obj, "__new__", None) # type: object
+ new: object = getattr(obj, "__new__", None)
if new:
return new != object.__new__
return False
class CallSpec2:
def __init__(self, metafunc: "Metafunc") -> None:
self.metafunc = metafunc
- self.funcargs = {} # type: Dict[str, object]
- self._idlist = [] # type: List[str]
- self.params = {} # type: Dict[str, object]
+ self.funcargs: Dict[str, object] = {}
+ self._idlist: List[str] = []
+ self.params: Dict[str, object] = {}
# Used for sorting parametrized resources.
- self._arg2scopenum = {} # type: Dict[str, int]
- self.marks = [] # type: List[Mark]
- self.indices = {} # type: Dict[str, int]
+ self._arg2scopenum: Dict[str, int] = {}
+ self.marks: List[Mark] = []
+ self.indices: Dict[str, int] = {}
def copy(self) -> "CallSpec2":
cs = CallSpec2(self.metafunc)
def _checkargnotcontained(self, arg: str) -> None:
if arg in self.params or arg in self.funcargs:
- raise ValueError("duplicate {!r}".format(arg))
+ raise ValueError(f"duplicate {arg!r}")
def getparam(self, name: str) -> object:
try:
def setmulti2(
self,
valtypes: Mapping[str, "Literal['params', 'funcargs']"],
- argnames: typing.Sequence[str],
+ argnames: Sequence[str],
valset: Iterable[object],
id: str,
marks: Iterable[Union[Mark, MarkDecorator]],
elif valtype_for_arg == "funcargs":
self.funcargs[arg] = val
else: # pragma: no cover
- assert False, "Unhandled valtype for arg: {}".format(valtype_for_arg)
+ assert False, f"Unhandled valtype for arg: {valtype_for_arg}"
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
self._idlist.append(id)
cls=None,
module=None,
) -> None:
+ #: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
self.definition = definition
#: Access to the :class:`_pytest.config.Config` object for the test session.
#: Class object where the test function is defined in or ``None``.
self.cls = cls
- self._calls = [] # type: List[CallSpec2]
+ self._calls: List[CallSpec2] = []
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
def parametrize(
self,
argnames: Union[str, List[str], Tuple[str, ...]],
- argvalues: Iterable[Union[ParameterSet, typing.Sequence[object], object]],
- indirect: Union[bool, typing.Sequence[str]] = False,
+ argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
+ indirect: Union[bool, Sequence[str]] = False,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
] = None,
scope: "Optional[_Scope]" = None,
*,
- _param_mark: Optional[Mark] = None
+ _param_mark: Optional[Mark] = None,
) -> None:
"""Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
scopenum = scope2index(
- scope, descr="parametrize() call in {}".format(self.function.__name__)
+ scope, descr=f"parametrize() call in {self.function.__name__}"
)
# Create the new calls: if we are parametrize() multiple times (by applying the decorator
def _resolve_arg_ids(
self,
- argnames: typing.Sequence[str],
+ argnames: Sequence[str],
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[Any], Optional[object]],
]
],
- parameters: typing.Sequence[ParameterSet],
+ parameters: Sequence[ParameterSet],
nodeid: str,
) -> List[str]:
"""Resolve the actual ids for the given argnames, based on the ``ids`` parameter given
def _validate_ids(
self,
ids: Iterable[Union[None, str, float, int, bool]],
- parameters: typing.Sequence[ParameterSet],
+ parameters: Sequence[ParameterSet],
func_name: str,
) -> List[Union[None, str]]:
try:
return new_ids
def _resolve_arg_value_types(
- self,
- argnames: typing.Sequence[str],
- indirect: Union[bool, typing.Sequence[str]],
+ self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]],
) -> Dict[str, "Literal['params', 'funcargs']"]:
"""Resolve if each parametrized argument must be considered a
parameter to a fixture or a "funcarg" to the function, based on the
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
if isinstance(indirect, bool):
- valtypes = dict.fromkeys(
+ valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys(
argnames, "params" if indirect else "funcargs"
- ) # type: Dict[str, Literal["params", "funcargs"]]
+ )
elif isinstance(indirect, Sequence):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
return valtypes
def _validate_if_using_arg_names(
- self,
- argnames: typing.Sequence[str],
- indirect: Union[bool, typing.Sequence[str]],
+ self, argnames: Sequence[str], indirect: Union[bool, Sequence[str]],
) -> None:
"""Check if all argnames are being used, by default values, or directly/indirectly.
else:
name = "fixture" if indirect else "argument"
fail(
- "In {}: function uses no {} '{}'".format(func_name, name, arg),
+ f"In {func_name}: function uses no {name} '{arg}'",
pytrace=False,
)
def _find_parametrized_scope(
- argnames: typing.Sequence[str],
- arg2fixturedefs: Mapping[str, typing.Sequence[fixtures.FixtureDef[object]]],
- indirect: Union[bool, typing.Sequence[str]],
+ argnames: Sequence[str],
+ arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
+ indirect: Union[bool, Sequence[str]],
) -> "fixtures._Scope":
"""Find the most appropriate scope for a parametrized call based on its arguments.
if generated_id is not None:
val = generated_id
except Exception as e:
- prefix = "{}: ".format(nodeid) if nodeid is not None else ""
+ prefix = f"{nodeid}: " if nodeid is not None else ""
msg = "error raised while trying to determine id of parameter '{}' at position {}"
msg = prefix + msg.format(argname, idx)
raise ValueError(msg) from e
elif config:
- hook_id = config.hook.pytest_make_parametrize_id(
+ hook_id: Optional[str] = config.hook.pytest_make_parametrize_id(
config=config, val=val, argname=argname
- ) # type: Optional[str]
+ )
if hook_id:
return hook_id
return str(val)
elif isinstance(getattr(val, "__name__", None), str):
# Name of a class, function, module, etc.
- name = getattr(val, "__name__") # type: str
+ name: str = getattr(val, "__name__")
return name
return str(argname) + str(idx)
test_id_counts = Counter(resolved_ids)
# Map the test ID to its next suffix.
- test_id_suffixes = defaultdict(int) # type: Dict[str, int]
+ test_id_suffixes: Dict[str, int] = defaultdict(int)
# Suffix non-unique IDs to make them unique.
for index, test_id in enumerate(resolved_ids):
return
if verbose > 0:
bestrel = get_best_relpath(fixture_def.func)
- funcargspec = "{} -- {}".format(argname, bestrel)
+ funcargspec = f"{argname} -- {bestrel}"
else:
funcargspec = argname
tw.line(funcargspec, green=True)
def write_item(item: nodes.Item) -> None:
# Not all items have _fixtureinfo attribute.
- info = getattr(item, "_fixtureinfo", None) # type: Optional[FuncFixtureInfo]
+ info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None)
if info is None or not info.name2fixturedefs:
# This test item does not use any fixtures.
return
tw.line()
- tw.sep("-", "fixtures used by {}".format(item.name))
+ tw.sep("-", f"fixtures used by {item.name}")
# TODO: Fix this type ignore.
tw.sep("-", "({})".format(get_best_relpath(item.function))) # type: ignore[attr-defined]
# dict key not used in loop but needed for sorting.
fm = session._fixturemanager
available = []
- seen = set() # type: Set[Tuple[str, str]]
+ seen: Set[Tuple[str, str]] = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
- tw.sep("-", "fixtures defined from {}".format(module))
+ tw.sep("-", f"fixtures defined from {module}")
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if doc:
write_docstring(tw, doc)
else:
- tw.line(" {}: no docstring available".format(loc), red=True)
+ tw.line(f" {loc}: no docstring available", red=True)
tw.line()
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self, self.obj, self.cls, funcargs=True
)
- self._fixtureinfo = fixtureinfo # type: FuncFixtureInfo
+ self._fixtureinfo: FuncFixtureInfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
return super().from_parent(parent=parent, **kw)
def _initrequest(self) -> None:
- self.funcargs = {} # type: Dict[str, object]
- self._request = fixtures.FixtureRequest(self)
+ self.funcargs: Dict[str, object] = {}
+ self._request = fixtures.FixtureRequest(self, _ispytest=True)
@property
def function(self):
def _prunetraceback(self, excinfo: ExceptionInfo[BaseException]) -> None:
if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
- code = _pytest._code.Code(get_real_func(self.obj))
+ code = _pytest._code.Code.from_function(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
class FunctionDefinition(Function):
- """Internal hack until we get actual definition nodes instead of the
- crappy metafunc hack."""
+ """
+ This class is a step gap solution until we evolve to have actual function definition nodes
+ and manage to get rid of ``metafunc``.
+ """
def runtest(self) -> None:
- raise RuntimeError("function definitions are not supposed to be used")
+ raise RuntimeError("function definitions are not supposed to be run as tests")
setup = runtest
from collections.abc import Mapping
from collections.abc import Sized
from decimal import Decimal
-from numbers import Number
+from numbers import Complex
from types import TracebackType
from typing import Any
from typing import Callable
from typing import cast
from typing import Generic
from typing import Optional
+from typing import overload
from typing import Pattern
from typing import Tuple
+from typing import Type
from typing import TypeVar
from typing import Union
import _pytest._code
from _pytest.compat import final
-from _pytest.compat import overload
from _pytest.compat import STRING_TYPES
-from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
-if TYPE_CHECKING:
- from typing import Type
-
def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
- at_str = " at {}".format(at) if at else ""
+ at_str = f" at {at}" if at else ""
return TypeError(
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
value, at_str
def __repr__(self) -> str:
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
- return "approx({!r})".format(list_scalars)
+ return f"approx({list_scalars!r})"
def __eq__(self, actual) -> bool:
import numpy as np
try:
actual = np.asarray(actual)
except Exception as e:
- raise TypeError(
- "cannot compare '{}' to numpy.ndarray".format(actual)
- ) from e
+ raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
)
def __eq__(self, actual) -> bool:
- if set(actual.keys()) != set(self.expected.keys()):
+ try:
+ if set(actual.keys()) != set(self.expected.keys()):
+ return False
+ except AttributeError:
return False
return ApproxBase.__eq__(self, actual)
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
- elif not isinstance(value, Number):
- raise _non_numeric_type_error(self.expected, at="key={!r}".format(key))
class ApproxSequencelike(ApproxBase):
)
def __eq__(self, actual) -> bool:
- if len(actual) != len(self.expected):
+ try:
+ if len(actual) != len(self.expected):
+ return False
+ except TypeError:
return False
return ApproxBase.__eq__(self, actual)
if isinstance(x, type(self.expected)):
msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
- elif not isinstance(x, Number):
- raise _non_numeric_type_error(
- self.expected, at="index {}".format(index)
- )
class ApproxScalar(ApproxBase):
# Using Real should be better than this Union, but not possible yet:
# https://github.com/python/typeshed/pull/3108
- DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 # type: Union[float, Decimal]
- DEFAULT_RELATIVE_TOLERANCE = 1e-6 # type: Union[float, Decimal]
+ DEFAULT_ABSOLUTE_TOLERANCE: Union[float, Decimal] = 1e-12
+ DEFAULT_RELATIVE_TOLERANCE: Union[float, Decimal] = 1e-6
def __repr__(self) -> str:
"""Return a string communicating both the expected value and the
For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠±180°``.
"""
- # Infinities aren't compared using tolerances, so don't show a
- # tolerance. Need to call abs to handle complex numbers, e.g. (inf + 1j).
- if math.isinf(abs(self.expected)):
+ # Don't show a tolerance for values that aren't compared using
+ # tolerances, i.e. non-numerics and infinities. Need to call abs to
+ # handle complex numbers, e.g. (inf + 1j).
+ if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf(
+ abs(self.expected) # type: ignore[arg-type]
+ ):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
- vetted_tolerance = "{:.1e}".format(self.tolerance)
- if isinstance(self.expected, complex) and not math.isinf(self.tolerance):
+ vetted_tolerance = f"{self.tolerance:.1e}"
+ if (
+ isinstance(self.expected, Complex)
+ and self.expected.imag
+ and not math.isinf(self.tolerance)
+ ):
vetted_tolerance += " ∠±180°"
except ValueError:
vetted_tolerance = "???"
- return "{} ± {}".format(self.expected, vetted_tolerance)
+ return f"{self.expected} ± {vetted_tolerance}"
def __eq__(self, actual) -> bool:
"""Return whether the given value is equal to the expected value
if actual == self.expected:
return True
+ # If either type is non-numeric, fall back to strict equality.
+ # NB: we need Complex, rather than just Number, to ensure that __abs__,
+ # __sub__, and __float__ are defined.
+ if not (
+ isinstance(self.expected, (Complex, Decimal))
+ and isinstance(actual, (Complex, Decimal))
+ ):
+ return False
+
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
- if math.isnan(abs(self.expected)):
- return self.nan_ok and math.isnan(abs(actual))
+ if math.isnan(abs(self.expected)): # type: ignore[arg-type]
+ return self.nan_ok and math.isnan(abs(actual)) # type: ignore[arg-type]
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
- if math.isinf(abs(self.expected)):
+ if math.isinf(abs(self.expected)): # type: ignore[arg-type]
return False
# Return true if the two numbers are within the tolerance.
- result = abs(self.expected - actual) <= self.tolerance # type: bool
+ result: bool = abs(self.expected - actual) <= self.tolerance
return result
# Ignore type because of https://github.com/python/mypy/issues/4266.
if absolute_tolerance < 0:
raise ValueError(
- "absolute tolerance can't be negative: {}".format(absolute_tolerance)
+ f"absolute tolerance can't be negative: {absolute_tolerance}"
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
if relative_tolerance < 0:
raise ValueError(
- "relative tolerance can't be negative: {}".format(absolute_tolerance)
+ f"relative tolerance can't be negative: {absolute_tolerance}"
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
+ You can also use ``approx`` to compare nonnumeric types, or dicts and
+ sequences containing nonnumeric types, in which case it falls back to
+ strict equality. This can be useful for comparing dicts and sequences that
+ can contain optional values::
+
+ >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None})
+ True
+ >>> [None, 1.0000005] == approx([None,1])
+ True
+ >>> ["foo", 1.0000005] == approx([None,1])
+ False
+
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
- default. Only available in python>=3.5. `More information...`__
+ default. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
- __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
+ __ https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
+
+ .. versionchanged:: 3.7.1
+ ``approx`` raises ``TypeError`` when it encounters a dict value or
+ sequence element of nonnumeric type.
+
+ .. versionchanged:: 6.1.0
+ ``approx`` falls back to strict equality for nonnumeric types instead
+ of raising ``TypeError``.
"""
# Delegate the comparison to a class that knows how to deal with the type
__tracebackhide__ = True
if isinstance(expected, Decimal):
- cls = ApproxDecimal # type: Type[ApproxBase]
- elif isinstance(expected, Number):
- cls = ApproxScalar
+ cls: Type[ApproxBase] = ApproxDecimal
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif _is_numpy_array(expected):
):
cls = ApproxSequencelike
else:
- raise _non_numeric_type_error(expected, at=None)
+ cls = ApproxScalar
return cls(expected, rel, abs, nan_ok)
"""
import sys
- np = sys.modules.get("numpy") # type: Any
+ np: Any = sys.modules.get("numpy")
if np is not None:
return isinstance(obj, np.ndarray)
return False
@overload
def raises(
- expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
+ expected_exception: Union[Type[_E], Tuple[Type[_E], ...]],
*,
- match: "Optional[Union[str, Pattern[str]]]" = ...
+ match: Optional[Union[str, Pattern[str]]] = ...,
) -> "RaisesContext[_E]":
...
-@overload # noqa: F811
-def raises( # noqa: F811
- expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
+@overload
+def raises(
+ expected_exception: Union[Type[_E], Tuple[Type[_E], ...]],
func: Callable[..., Any],
*args: Any,
- **kwargs: Any
+ **kwargs: Any,
) -> _pytest._code.ExceptionInfo[_E]:
...
-def raises( # noqa: F811
- expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
- *args: Any,
- **kwargs: Any
+def raises(
+ expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], *args: Any, **kwargs: Any
) -> Union["RaisesContext[_E]", _pytest._code.ExceptionInfo[_E]]:
r"""Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type::
- >>> with raises(ZeroDivisionError):
+ >>> import pytest
+ >>> with pytest.raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
You can also use the keyword argument ``match`` to assert that the
exception matches a text or regex::
- >>> with raises(ValueError, match='must be 0 or None'):
+ >>> with pytest.raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
- >>> with raises(ValueError, match=r'must be \d+$'):
+ >>> with pytest.raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
details of the captured exception::
- >>> with raises(ValueError) as exc_info:
+ >>> with pytest.raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
not be executed. For example::
>>> value = 15
- >>> with raises(ValueError) as exc_info:
+ >>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
- >>> with raises(ValueError) as exc_info:
+ >>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
__tracebackhide__ = True
if isinstance(expected_exception, type):
- excepted_exceptions = (expected_exception,) # type: Tuple[Type[_E], ...]
+ excepted_exceptions: Tuple[Type[_E], ...] = (expected_exception,)
else:
excepted_exceptions = expected_exception
for exc in excepted_exceptions:
not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
raise TypeError(msg.format(not_a))
- message = "DID NOT RAISE {}".format(expected_exception)
+ message = f"DID NOT RAISE {expected_exception}"
if not args:
- match = kwargs.pop("match", None) # type: Optional[Union[str, Pattern[str]]]
+ match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None)
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(sorted(kwargs))
class RaisesContext(Generic[_E]):
def __init__(
self,
- expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
+ expected_exception: Union[Type[_E], Tuple[Type[_E], ...]],
message: str,
- match_expr: Optional[Union[str, "Pattern[str]"]] = None,
+ match_expr: Optional[Union[str, Pattern[str]]] = None,
) -> None:
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
- self.excinfo = None # type: Optional[_pytest._code.ExceptionInfo[_E]]
+ self.excinfo: Optional[_pytest._code.ExceptionInfo[_E]] = None
def __enter__(self) -> _pytest._code.ExceptionInfo[_E]:
self.excinfo = _pytest._code.ExceptionInfo.for_later()
def __exit__(
self,
- exc_type: Optional["Type[BaseException]"],
+ exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> bool:
if not issubclass(exc_type, self.expected_exception):
return False
# Cast to narrow the exception type now that it's verified.
- exc_info = cast(
- Tuple["Type[_E]", _E, TracebackType], (exc_type, exc_val, exc_tb)
- )
+ exc_info = cast(Tuple[Type[_E], _E, TracebackType], (exc_type, exc_val, exc_tb))
self.excinfo.fill_unfilled(exc_info)
if self.match_expr is not None:
self.excinfo.match(self.match_expr)
from typing import Iterator
from typing import List
from typing import Optional
+from typing import overload
from typing import Pattern
from typing import Tuple
+from typing import Type
from typing import TypeVar
from typing import Union
from _pytest.compat import final
-from _pytest.compat import overload
-from _pytest.compat import TYPE_CHECKING
+from _pytest.deprecated import check_ispytest
from _pytest.fixtures import fixture
from _pytest.outcomes import fail
-if TYPE_CHECKING:
- from typing import Type
-
T = TypeVar("T")
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
- wrec = WarningsRecorder()
+ wrec = WarningsRecorder(_ispytest=True)
with wrec:
warnings.simplefilter("default")
yield wrec
@overload
def deprecated_call(
- *, match: Optional[Union[str, "Pattern[str]"]] = ...
+ *, match: Optional[Union[str, Pattern[str]]] = ...
) -> "WarningsRecorder":
...
-@overload # noqa: F811
-def deprecated_call( # noqa: F811
- func: Callable[..., T], *args: Any, **kwargs: Any
-) -> T:
+@overload
+def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
...
-def deprecated_call( # noqa: F811
+def deprecated_call(
func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any
) -> Union["WarningsRecorder", Any]:
"""Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``.
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
- >>> with deprecated_call():
+ >>> import pytest
+ >>> with pytest.deprecated_call():
... assert api_call_v2() == 200
It can also be used by passing a function and ``*args`` and ``**kwargs``,
@overload
def warns(
- expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
+ expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]],
*,
- match: "Optional[Union[str, Pattern[str]]]" = ...
+ match: Optional[Union[str, Pattern[str]]] = ...,
) -> "WarningsChecker":
...
-@overload # noqa: F811
-def warns( # noqa: F811
- expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
+@overload
+def warns(
+ expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]],
func: Callable[..., T],
*args: Any,
- **kwargs: Any
+ **kwargs: Any,
) -> T:
...
-def warns( # noqa: F811
- expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
+def warns(
+ expected_warning: Optional[Union[Type[Warning], Tuple[Type[Warning], ...]]],
*args: Any,
- match: Optional[Union[str, "Pattern[str]"]] = None,
- **kwargs: Any
+ match: Optional[Union[str, Pattern[str]]] = None,
+ **kwargs: Any,
) -> Union["WarningsChecker", Any]:
r"""Assert that code raises a particular class of warning.
one for each warning raised.
This function can be used as a context manager, or any of the other ways
- ``pytest.raises`` can be used::
+ :func:`pytest.raises` can be used::
- >>> with warns(RuntimeWarning):
+ >>> import pytest
+ >>> with pytest.warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
In the context manager form you may use the keyword argument ``match`` to assert
that the warning matches a text or regex::
- >>> with warns(UserWarning, match='must be 0 or None'):
+ >>> with pytest.warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
- >>> with warns(UserWarning, match=r'must be \d+$'):
+ >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
- >>> with warns(UserWarning, match=r'must be \d+$'):
+ >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
Traceback (most recent call last):
...
msg += ", ".join(sorted(kwargs))
msg += "\nUse context-manager form instead?"
raise TypeError(msg)
- return WarningsChecker(expected_warning, match_expr=match)
+ return WarningsChecker(expected_warning, match_expr=match, _ispytest=True)
else:
func = args[0]
if not callable(func):
raise TypeError(
"{!r} object (type: {}) must be callable".format(func, type(func))
)
- with WarningsChecker(expected_warning):
+ with WarningsChecker(expected_warning, _ispytest=True):
return func(*args[1:], **kwargs)
Adapted from `warnings.catch_warnings`.
"""
- def __init__(self) -> None:
+ def __init__(self, *, _ispytest: bool = False) -> None:
+ check_ispytest(_ispytest)
# Type ignored due to the way typeshed handles warnings.catch_warnings.
super().__init__(record=True) # type: ignore[call-arg]
self._entered = False
- self._list = [] # type: List[warnings.WarningMessage]
+ self._list: List[warnings.WarningMessage] = []
@property
def list(self) -> List["warnings.WarningMessage"]:
"""The number of recorded warnings."""
return len(self._list)
- def pop(self, cls: "Type[Warning]" = Warning) -> "warnings.WarningMessage":
+ def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage":
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
def __exit__(
self,
- exc_type: Optional["Type[BaseException]"],
+ exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
def __init__(
self,
expected_warning: Optional[
- Union["Type[Warning]", Tuple["Type[Warning]", ...]]
+ Union[Type[Warning], Tuple[Type[Warning], ...]]
] = None,
- match_expr: Optional[Union[str, "Pattern[str]"]] = None,
+ match_expr: Optional[Union[str, Pattern[str]]] = None,
+ *,
+ _ispytest: bool = False,
) -> None:
- super().__init__()
+ check_ispytest(_ispytest)
+ super().__init__(_ispytest=True)
msg = "exceptions must be derived from Warning, not %s"
if expected_warning is None:
def __exit__(
self,
- exc_type: Optional["Type[BaseException]"],
+ exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
from io import StringIO
+from pathlib import Path
from pprint import pprint
from typing import Any
from typing import cast
from typing import List
from typing import Optional
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import final
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.outcomes import skip
-from _pytest.pathlib import Path
if TYPE_CHECKING:
from typing import NoReturn
- from typing_extensions import Type
from typing_extensions import Literal
from _pytest.runner import CallInfo
class BaseReport:
- when = None # type: Optional[str]
- location = None # type: Optional[Tuple[str, Optional[int], str]]
- longrepr = (
- None
- ) # type: Union[None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr]
- sections = [] # type: List[Tuple[str, str]]
- nodeid = None # type: str
+ when: Optional[str]
+ location: Optional[Tuple[str, Optional[int], str]]
+ longrepr: Union[
+ None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
+ ]
+ sections: List[Tuple[str, str]]
+ nodeid: str
def __init__(self, **kw: Any) -> None:
self.__dict__.update(kw)
return _report_to_json(self)
@classmethod
- def _from_json(cls: "Type[_R]", reportdict: Dict[str, object]) -> _R:
+ def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R:
"""Create either a TestReport or CollectReport, depending on the calling class.
It is the callers responsibility to know which class to pass here.
def _report_unserialization_failure(
- type_name: str, report_class: "Type[BaseReport]", reportdict
+ type_name: str, report_class: Type[BaseReport], reportdict
) -> "NoReturn":
url = "https://github.com/pytest-dev/pytest/issues"
stream = StringIO()
sections: Iterable[Tuple[str, str]] = (),
duration: float = 0,
user_properties: Optional[Iterable[Tuple[str, object]]] = None,
- **extra
+ **extra,
) -> None:
#: Normalized collection nodeid.
self.nodeid = nodeid
#: A (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
- self.location = location # type: Tuple[str, Optional[int], str]
+ self.location: Tuple[str, Optional[int], str] = location
#: A name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
excinfo = call.excinfo
sections = []
if not call.excinfo:
- outcome = "passed" # type: Literal["passed", "failed", "skipped"]
- longrepr = (
- None
- ) # type: Union[None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr]
+ outcome: Literal["passed", "failed", "skipped"] = "passed"
+ longrepr: Union[
+ None,
+ ExceptionInfo[BaseException],
+ Tuple[str, int, str],
+ str,
+ TerminalRepr,
+ ] = (None)
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
excinfo, style=item.config.getoption("tbstyle", "auto")
)
for rwhen, key, content in item._report_sections:
- sections.append(("Captured {} {}".format(key, rwhen), content))
+ sections.append((f"Captured {key} {rwhen}", content))
return cls(
item.nodeid,
item.location,
longrepr,
result: Optional[List[Union[Item, Collector]]],
sections: Iterable[Tuple[str, str]] = (),
- **extra
+ **extra,
) -> None:
#: Normalized collection nodeid.
self.nodeid = nodeid
assert rep.longrepr is not None
# TODO: Investigate whether the duck typing is really necessary here.
longrepr = cast(ExceptionRepr, rep.longrepr)
- result = {
+ result: Dict[str, Any] = {
"reprcrash": serialize_repr_crash(longrepr.reprcrash),
"reprtraceback": serialize_repr_traceback(longrepr.reprtraceback),
"sections": longrepr.sections,
- } # type: Dict[str, Any]
+ }
if isinstance(longrepr, ExceptionChainRepr):
result["chain"] = []
for repr_traceback, repr_crash, description in longrepr.chain:
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
- reprentry = ReprEntry(
+ reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
reprfileloc=reprfileloc,
style=data["style"],
- ) # type: Union[ReprEntry, ReprEntryNative]
+ )
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
description,
)
)
- exception_info = ExceptionChainRepr(
- chain
- ) # type: Union[ExceptionChainRepr,ReprExceptionInfo]
+ exception_info: Union[
+ ExceptionChainRepr, ReprExceptionInfo
+ ] = ExceptionChainRepr(chain)
else:
exception_info = ReprExceptionInfo(reprtraceback, reprcrash)
from typing import List
from typing import Optional
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import final
-from _pytest.compat import TYPE_CHECKING
from _pytest.config.argparsing import Parser
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.outcomes import TEST_OUTCOME
if TYPE_CHECKING:
- from typing import Type
from typing_extensions import Literal
from _pytest.main import Session
dlist.append(rep)
if not dlist:
return
- dlist.sort(key=lambda x: x.duration)
- dlist.reverse()
+ dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return]
if not durations:
tr.write_sep("=", "slowest durations")
else:
% (len(dlist) - i, durations_min)
)
break
- tr.write_line("{:02.2f}s {:<8} {}".format(rep.duration, rep.when, rep.nodeid))
+ tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
def pytest_sessionstart(session: "Session") -> None:
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
- value = "{} ({})".format(item.nodeid, when)
+ value = f"{item.nodeid} ({when})"
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
) -> TestReport:
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
- report = hook.pytest_runtest_makereport(item=item, call=call) # type: TestReport
+ report: TestReport = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds
) -> "CallInfo[None]":
if when == "setup":
- ihook = item.ihook.pytest_runtest_setup # type: Callable[..., None]
+ ihook: Callable[..., None] = item.ihook.pytest_runtest_setup
elif when == "call":
ihook = item.ihook.pytest_runtest_call
elif when == "teardown":
ihook = item.ihook.pytest_runtest_teardown
else:
- assert False, "Unhandled runtest hook case: {}".format(when)
- reraise = (Exit,) # type: Tuple[Type[BaseException], ...]
+ assert False, f"Unhandled runtest hook case: {when}"
+ reraise: Tuple[Type[BaseException], ...] = (Exit,)
if not item.config.getoption("usepdb", False):
reraise += (KeyboardInterrupt,)
return CallInfo.from_call(
@property
def result(self) -> TResult:
if self.excinfo is not None:
- raise AttributeError("{!r} has no valid result".format(self))
+ raise AttributeError(f"{self!r} has no valid result")
# The cast is safe because an exception wasn't raised, hence
# _result has the expected function return type (which may be
# None, that's why a cast and not an assert).
cls,
func: "Callable[[], TResult]",
when: "Literal['collect', 'setup', 'call', 'teardown']",
- reraise: "Optional[Union[Type[BaseException], Tuple[Type[BaseException], ...]]]" = None,
+ reraise: Optional[
+ Union[Type[BaseException], Tuple[Type[BaseException], ...]]
+ ] = None,
) -> "CallInfo[TResult]":
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
- result = func() # type: Optional[TResult]
+ result: Optional[TResult] = func()
except BaseException:
excinfo = ExceptionInfo.from_current()
if reraise is not None and isinstance(excinfo.value, reraise):
def __repr__(self) -> str:
if self.excinfo is None:
- return "<CallInfo when={!r} result: {!r}>".format(self.when, self._result)
- return "<CallInfo when={!r} excinfo={!r}>".format(self.when, self.excinfo)
+ return f"<CallInfo when={self.when!r} result: {self._result!r}>"
+ return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
def pytest_make_collect_report(collector: Collector) -> CollectReport:
call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
- longrepr = None # type: Union[None, Tuple[str, int, str], str, TerminalRepr]
+ longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None
if not call.excinfo:
- outcome = "passed" # type: Literal["passed", "skipped", "failed"]
+ outcome: Literal["passed", "skipped", "failed"] = "passed"
else:
skip_exceptions = [Skipped]
unittest = sys.modules.get("unittest")
"""Shared state for setting up/tearing down test items or collectors."""
def __init__(self):
- self.stack = [] # type: List[Node]
- self._finalizers = {} # type: Dict[Node, List[Callable[[], object]]]
+ self.stack: List[Node] = []
+ self._finalizers: Dict[Node, List[Callable[[], object]]] = {}
def addfinalizer(self, finalizer: Callable[[], object], colitem) -> None:
"""Attach a finalizer to the given colitem."""
def collect_one_node(collector: Collector) -> CollectReport:
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
- rep = ihook.pytest_make_collect_report(collector=collector) # type: CollectReport
+ rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
import platform
import sys
import traceback
+from collections.abc import Mapping
from typing import Generator
from typing import Optional
from typing import Tuple
+from typing import Type
import attr
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.runner import CallInfo
from _pytest.store import StoreKey
-if TYPE_CHECKING:
- from typing import Type
-
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
"platform": platform,
"config": item.config,
}
+ for dictionary in reversed(
+ item.ihook.pytest_markeval_namespace(config=item.config)
+ ):
+ if not isinstance(dictionary, Mapping):
+ raise ValueError(
+ "pytest_markeval_namespace() needs to return a dict, got {!r}".format(
+ dictionary
+ )
+ )
+ globals_.update(dictionary)
if hasattr(item, "obj"):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
try:
- filename = "<{} condition>".format(mark.name)
+ filename = f"<{mark.name} condition>"
condition_code = compile(condition, filename, "eval")
result = eval(condition_code, globals_)
except SyntaxError as exc:
reason = attr.ib(type=str)
run = attr.ib(type=bool)
strict = attr.ib(type=bool)
- raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]])
+ raises = attr.ib(type=Optional[Tuple[Type[BaseException], ...]])
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key]
if reason:
- rep.longrepr = "Unexpected success: {}".format(reason)
+ rep.longrepr = f"Unexpected success: {reason}"
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
from typing import List
from typing import Optional
+from typing import TYPE_CHECKING
import pytest
from _pytest import nodes
from _pytest.main import Session
from _pytest.reports import TestReport
+if TYPE_CHECKING:
+ from _pytest.cacheprovider import Cache
+
+STEPWISE_CACHE_DIR = "cache/stepwise"
+
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
"--sw",
"--stepwise",
action="store_true",
+ default=False,
dest="stepwise",
help="exit on test failure and continue from last failing test next time",
)
group.addoption(
+ "--sw-skip",
"--stepwise-skip",
action="store_true",
+ default=False,
dest="stepwise_skip",
help="ignore the first failing test but stop on the next failing test",
)
@pytest.hookimpl
def pytest_configure(config: Config) -> None:
- config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
+ # We should always have a cache as cache provider plugin uses tryfirst=True
+ if config.getoption("stepwise"):
+ config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
+
+
+def pytest_sessionfinish(session: Session) -> None:
+ if not session.config.getoption("stepwise"):
+ assert session.config.cache is not None
+ # Clear the list of failing tests if the plugin is not active.
+ session.config.cache.set(STEPWISE_CACHE_DIR, [])
class StepwisePlugin:
def __init__(self, config: Config) -> None:
self.config = config
- self.active = config.getvalue("stepwise")
- self.session = None # type: Optional[Session]
+ self.session: Optional[Session] = None
self.report_status = ""
-
- if self.active:
- assert config.cache is not None
- self.lastfailed = config.cache.get("cache/stepwise", None)
- self.skip = config.getvalue("stepwise_skip")
+ assert config.cache is not None
+ self.cache: Cache = config.cache
+ self.lastfailed: Optional[str] = self.cache.get(STEPWISE_CACHE_DIR, None)
+ self.skip: bool = config.getoption("stepwise_skip")
def pytest_sessionstart(self, session: Session) -> None:
self.session = session
def pytest_collection_modifyitems(
- self, session: Session, config: Config, items: List[nodes.Item]
+ self, config: Config, items: List[nodes.Item]
) -> None:
- if not self.active:
- return
if not self.lastfailed:
self.report_status = "no previously failed tests, not skipping."
return
- already_passed = []
- found = False
-
- # Make a list of all tests that have been run before the last failing one.
- for item in items:
+ # check all item nodes until we find a match on last failed
+ failed_index = None
+ for index, item in enumerate(items):
if item.nodeid == self.lastfailed:
- found = True
+ failed_index = index
break
- else:
- already_passed.append(item)
# If the previously failed test was not found among the test items,
# do not skip any tests.
- if not found:
+ if failed_index is None:
self.report_status = "previously failed test not found, not skipping."
- already_passed = []
else:
- self.report_status = "skipping {} already passed items.".format(
- len(already_passed)
- )
-
- for item in already_passed:
- items.remove(item)
-
- config.hook.pytest_deselected(items=already_passed)
+ self.report_status = f"skipping {failed_index} already passed items."
+ deselected = items[:failed_index]
+ del items[:failed_index]
+ config.hook.pytest_deselected(items=deselected)
def pytest_runtest_logreport(self, report: TestReport) -> None:
- if not self.active:
- return
-
if report.failed:
if self.skip:
# Remove test from the failed ones (if it exists) and unset the skip option
self.lastfailed = None
def pytest_report_collectionfinish(self) -> Optional[str]:
- if self.active and self.config.getoption("verbose") >= 0 and self.report_status:
- return "stepwise: %s" % self.report_status
+ if self.config.getoption("verbose") >= 0 and self.report_status:
+ return f"stepwise: {self.report_status}"
return None
- def pytest_sessionfinish(self, session: Session) -> None:
- assert self.config.cache is not None
- if self.active:
- self.config.cache.set("cache/stepwise", self.lastfailed)
- else:
- # Clear the list of failing tests if the plugin is not active.
- self.config.cache.set("cache/stepwise", [])
+ def pytest_sessionfinish(self) -> None:
+ self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed)
__slots__ = ("_store",)
def __init__(self) -> None:
- self._store = {} # type: Dict[StoreKey[Any], object]
+ self._store: Dict[StoreKey[Any], object] = {}
def __setitem__(self, key: StoreKey[T], value: T) -> None:
"""Set a value for key."""
import platform
import sys
import warnings
+from collections import Counter
from functools import partial
+from pathlib import Path
from typing import Any
from typing import Callable
+from typing import cast
from typing import Dict
from typing import Generator
from typing import List
from typing import Set
from typing import TextIO
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import attr
import pluggy
import py
-import pytest
+import _pytest._version
from _pytest import nodes
from _pytest import timing
from _pytest._code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._io.wcwidth import wcswidth
from _pytest.compat import final
-from _pytest.compat import order_preserving_dict
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
+from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
-from _pytest.pathlib import Path
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
def getreportopt(config: Config) -> str:
- reportchars = config.option.reportchars # type: str
+ reportchars: str = config.option.reportchars
old_aliases = {"F", "S"}
reportopts = ""
return reportopts
-@pytest.hookimpl(trylast=True) # after _pytest.runner
+@hookimpl(trylast=True) # after _pytest.runner
def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
letter = "F"
if report.passed:
elif report.skipped:
letter = "s"
- outcome = report.outcome # type: str
+ outcome: str = report.outcome
if report.when in ("collect", "setup", "teardown") and outcome == "failed":
outcome = "error"
letter = "E"
relpath = bestrelpath(
config.invocation_params.dir, absolutepath(filename)
)
- return "{}:{}".format(relpath, linenum)
+ return f"{relpath}:{linenum}"
else:
return str(self.fslocation)
return None
self.config = config
self._numcollected = 0
- self._session = None # type: Optional[Session]
- self._showfspath = None # type: Optional[bool]
+ self._session: Optional[Session] = None
+ self._showfspath: Optional[bool] = None
- self.stats = {} # type: Dict[str, List[Any]]
- self._main_color = None # type: Optional[str]
- self._known_types = None # type: Optional[List[str]]
+ self.stats: Dict[str, List[Any]] = {}
+ self._main_color: Optional[str] = None
+ self._known_types: Optional[List[str]] = None
self.startdir = config.invocation_dir
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
- self.currentfspath = None # type: Union[None, Path, str, int]
+ self.currentfspath: Union[None, Path, str, int] = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
- self._progress_nodeids_reported = set() # type: Set[str]
+ self._progress_nodeids_reported: Set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
- self._collect_report_last_write = None # type: Optional[float]
- self._already_displayed_warnings = None # type: Optional[int]
- self._keyboardinterrupt_memo = None # type: Optional[ExceptionRepr]
+ self._collect_report_last_write: Optional[float] = None
+ self._already_displayed_warnings: Optional[int] = None
+ self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
- cfg = self.config.getini("console_output_style") # type: str
+ cfg: str = self.config.getini("console_output_style")
if cfg == "progress":
return "progress"
elif cfg == "count":
@property
def verbosity(self) -> int:
- verbosity = self.config.option.verbose # type: int
+ verbosity: int = self.config.option.verbose
return verbosity
@property
sep: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
- **markup: bool
+ **markup: bool,
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
- msg = "PLUGIN registered: {}".format(plugin)
+ msg = f"PLUGIN registered: {plugin}"
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
- res = self.config.hook.pytest_report_teststatus(
- report=rep, config=self.config
- ) # type: Tuple[str, str, Union[str, Tuple[str, Mapping[str, bool]]]]
+ res: Tuple[
+ str, str, Union[str, Tuple[str, Mapping[str, bool]]]
+ ] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
category, letter, word = res
if not isinstance(word, tuple):
markup = None
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
+ if rep.skipped or hasattr(report, "wasxfail"):
+ available_width = (
+ (self._tw.fullwidth - self._tw.width_of_current_line)
+ - len(" [100%]")
+ - 1
+ )
+ reason = _get_raw_skip_reason(rep)
+ reason_ = _format_trimmed(" ({})", reason, available_width)
+ if reason and reason_ is not None:
+ self._tw.write(reason_)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
- format_string = " [{}/{{}}]".format(counter_format)
+ format_string = f" [{counter_format}/{{}}]"
return format_string.format(len(progress), collected)
- return " [ {} / {} ]".format(collected, collected)
+ return f" [ {collected} / {collected} ]"
else:
if collected:
return " [{:3d}%]".format(
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
- items = [x for x in report.result if isinstance(x, pytest.Item)]
+ items = [x for x in report.result if isinstance(x, Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
else:
self.write_line(line)
- @pytest.hookimpl(trylast=True)
+ @hookimpl(trylast=True)
def pytest_sessionstart(self, session: "Session") -> None:
self._session = session
self._sessionstarttime = timing.time()
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
- msg = "platform {} -- Python {}".format(sys.platform, verinfo)
+ msg = f"platform {sys.platform} -- Python {verinfo}"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3])
msg += ", pytest-{}, py-{}, pluggy-{}".format(
- pytest.__version__, py.__version__, pluggy.__version__
+ _pytest._version.version, py.__version__, pluggy.__version__
)
if (
self.verbosity > 0
if config.inipath:
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
- testpaths = config.getini("testpaths") # type: List[str]
- if testpaths and config.args == testpaths:
+ testpaths: List[str] = config.getini("testpaths")
+ if config.invocation_params.dir == config.rootpath and config.args == testpaths:
line += ", testpaths: {}".format(", ".join(testpaths))
result = [line]
# because later versions are going to get rid of them anyway.
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
- counts = {} # type: Dict[str, int]
- for item in items:
- name = item.nodeid.split("::", 1)[0]
- counts[name] = counts.get(name, 0) + 1
+ counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
- stack = [] # type: List[Node]
+ stack: List[Node] = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
if col.name == "()": # Skip Instances.
continue
indent = (len(stack) - 1) * " "
- self._tw.line("{}{}".format(indent, col))
+ self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_sessionfinish(
self, session: "Session", exitstatus: Union[int, ExitCode]
):
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
- @pytest.hookimpl(hookwrapper=True)
+ @hookimpl(hookwrapper=True)
def pytest_terminal_summary(self) -> Generator[None, None, None]:
self.summary_errors()
self.summary_failures()
def summary_warnings(self) -> None:
if self.hasopt("w"):
- all_warnings = self.stats.get(
- "warnings"
- ) # type: Optional[List[WarningReport]]
+ all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings")
if not all_warnings:
return
if not warning_reports:
return
- reports_grouped_by_message = (
- order_preserving_dict()
- ) # type: Dict[str, List[WarningReport]]
+ reports_grouped_by_message: Dict[str, List[WarningReport]] = {}
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
if len(locations) < 10:
return "\n".join(map(str, locations))
- counts_by_filename = order_preserving_dict() # type: Dict[str, int]
- for loc in locations:
- key = str(loc).split("::", 1)[0]
- counts_by_filename[key] = counts_by_filename.get(key, 0) + 1
+ counts_by_filename = Counter(
+ str(loc).split("::", 1)[0] for loc in locations
+ )
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
def summary_passes(self) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
- reports = self.getreports("passed") # type: List[TestReport]
+ reports: List[TestReport] = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
def summary_failures(self) -> None:
if self.config.option.tbstyle != "no":
- reports = self.getreports("failed") # type: List[BaseReport]
+ reports: List[BaseReport] = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
- reports = self.getreports("error") # type: List[BaseReport]
+ reports: List[BaseReport] = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
- msg = "ERROR at {} of {}".format(rep.when, msg)
+ msg = f"ERROR at {rep.when} of {msg}"
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
for rep in xfailed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
- lines.append("{} {}".format(verbose_word, pos))
+ lines.append(f"{verbose_word} {pos}")
reason = rep.wasxfail
if reason:
lines.append(" " + str(reason))
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
reason = rep.wasxfail
- lines.append("{} {} {}".format(verbose_word, pos, reason))
+ lines.append(f"{verbose_word} {pos} {reason}")
def show_skipped(lines: List[str]) -> None:
- skipped = self.stats.get("skipped", []) # type: List[CollectReport]
+ skipped: List[CollectReport] = self.stats.get("skipped", [])
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
else:
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
- REPORTCHAR_ACTIONS = {
+ REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, "failed"),
"s": show_skipped,
"p": partial(show_simple, "passed"),
"E": partial(show_simple, "error"),
- } # type: Mapping[str, Callable[[List[str]], None]]
+ }
- lines = [] # type: List[str]
+ lines: List[str] = []
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
return main_color
def _set_main_color(self) -> None:
- unknown_types = [] # type: List[str]
+ unknown_types: List[str] = []
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
- main_color, known_types = self._get_main_color()
+ """
+ Build the parts used in the last summary stats line.
+
+ The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
+
+ This function builds a list of the "parts" that make up for the text in that line, in
+ the example above it would be:
+
+ [
+ ("12 passed", {"green": True}),
+ ("2 errors", {"red": True}
+ ]
+
+ That last dict for each line is a "markup dictionary", used by TerminalWriter to
+ color output.
+
+ The final color of the line is also determined by this function, and is the second
+ element of the returned tuple.
+ """
+ if self.config.getoption("collectonly"):
+ return self._build_collect_only_summary_stats_line()
+ else:
+ return self._build_normal_summary_stats_line()
+
+ def _get_reports_to_display(self, key: str) -> List[Any]:
+ """Get test/collection reports for the given status key, such as `passed` or `error`."""
+ reports = self.stats.get(key, [])
+ return [x for x in reports if getattr(x, "count_towards_summary", True)]
+ def _build_normal_summary_stats_line(
+ self,
+ ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
+ main_color, known_types = self._get_main_color()
parts = []
+
for key in known_types:
- reports = self.stats.get(key, None)
+ reports = self._get_reports_to_display(key)
if reports:
- count = sum(
- 1 for rep in reports if getattr(rep, "count_towards_summary", True)
- )
+ count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
- parts.append(("%d %s" % _make_plural(count, key), markup))
+ parts.append(("%d %s" % pluralize(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
+ def _build_collect_only_summary_stats_line(
+ self,
+ ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
+ deselected = len(self._get_reports_to_display("deselected"))
+ errors = len(self._get_reports_to_display("error"))
+
+ if self._numcollected == 0:
+ parts = [("no tests collected", {"yellow": True})]
+ main_color = "yellow"
+
+ elif deselected == 0:
+ main_color = "green"
+ collected_output = "%d %s collected" % pluralize(self._numcollected, "test")
+ parts = [(collected_output, {main_color: True})]
+ else:
+ all_tests_were_deselected = self._numcollected == deselected
+ if all_tests_were_deselected:
+ main_color = "yellow"
+ collected_output = f"no tests collected ({deselected} deselected)"
+ else:
+ main_color = "green"
+ selected = self._numcollected - deselected
+ collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
+
+ parts = [(collected_output, {main_color: True})]
+
+ if errors:
+ main_color = _color_for_type["error"]
+ parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})]
+
+ return parts, main_color
+
def _get_pos(config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
return nodeid
+def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
+ """Format msg into format, ellipsizing it if doesn't fit in available_width.
+
+ Returns None if even the ellipsis can't fit.
+ """
+ # Only use the first line.
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+
+ ellipsis = "..."
+ format_width = wcswidth(format.format(""))
+ if format_width + len(ellipsis) > available_width:
+ return None
+
+ if format_width + wcswidth(msg) > available_width:
+ available_width -= len(ellipsis)
+ msg = msg[:available_width]
+ while format_width + wcswidth(msg) > available_width:
+ msg = msg[:-1]
+ msg += ellipsis
+
+ return format.format(msg)
+
+
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
- line = "{} {}".format(verbose_word, pos)
- len_line = wcswidth(line)
- ellipsis, len_ellipsis = "...", 3
- if len_line > termwidth - len_ellipsis:
- # No space for an additional message.
- return line
+ line = f"{verbose_word} {pos}"
+ line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
except AttributeError:
pass
else:
- # Only use the first line.
- i = msg.find("\n")
- if i != -1:
- msg = msg[:i]
- len_msg = wcswidth(msg)
-
- sep, len_sep = " - ", 3
- max_len_msg = termwidth - len_line - len_sep
- if max_len_msg >= len_ellipsis:
- if len_msg > max_len_msg:
- max_len_msg -= len_ellipsis
- msg = msg[:max_len_msg]
- while wcswidth(msg) > max_len_msg:
- msg = msg[:-1]
- msg += ellipsis
- line += sep + msg
+ available_width = termwidth - line_width
+ msg = _format_trimmed(" - {}", msg, available_width)
+ if msg is not None:
+ line += msg
+
return line
def _folded_skips(
startpath: Path, skipped: Sequence[CollectReport],
) -> List[Tuple[int, str, Optional[int], str]]:
- d = {} # type: Dict[Tuple[str, Optional[int], str], List[CollectReport]]
+ d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {}
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
and "skip" in keywords
and "pytestmark" not in keywords
):
- key = (fspath, None, reason) # type: Tuple[str, Optional[int], str]
+ key: Tuple[str, Optional[int], str] = (fspath, None, reason)
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
- values = [] # type: List[Tuple[int, str, Optional[int], str]]
+ values: List[Tuple[int, str, Optional[int], str]] = []
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type_default = "yellow"
-def _make_plural(count: int, noun: str) -> Tuple[int, str]:
+def pluralize(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
- if noun not in ["error", "warnings"]:
+ if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
def _plugin_nameversions(plugininfo) -> List[str]:
- values = [] # type: List[str]
+ values: List[str] = []
for plugin, dist in plugininfo:
# Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
- return "{:.2f}s".format(seconds)
+ return f"{seconds:.2f}s"
else:
dt = datetime.timedelta(seconds=int(seconds))
- return "{:.2f}s ({})".format(seconds, dt)
+ return f"{seconds:.2f}s ({dt})"
+
+
+def _get_raw_skip_reason(report: TestReport) -> str:
+ """Get the reason string of a skip/xfail/xpass test report.
+
+ The string is just the part given by the user.
+ """
+ if hasattr(report, "wasxfail"):
+ reason = cast(str, report.wasxfail)
+ if reason.startswith("reason: "):
+ reason = reason[len("reason: ") :]
+ return reason
+ else:
+ assert report.skipped
+ assert isinstance(report.longrepr, tuple)
+ _, _, reason = report.longrepr
+ if reason.startswith("Skipped: "):
+ reason = reason[len("Skipped: ") :]
+ return reason
--- /dev/null
+import threading
+import traceback
+import warnings
+from types import TracebackType
+from typing import Any
+from typing import Callable
+from typing import Generator
+from typing import Optional
+from typing import Type
+
+import pytest
+
+
+# Copied from cpython/Lib/test/support/threading_helper.py, with modifications.
+class catch_threading_exception:
+ """Context manager catching threading.Thread exception using
+ threading.excepthook.
+
+ Storing exc_value using a custom hook can create a reference cycle. The
+ reference cycle is broken explicitly when the context manager exits.
+
+ Storing thread using a custom hook can resurrect it if it is set to an
+ object which is being finalized. Exiting the context manager clears the
+ stored object.
+
+ Usage:
+ with threading_helper.catch_threading_exception() as cm:
+ # code spawning a thread which raises an exception
+ ...
+ # check the thread exception: use cm.args
+ ...
+ # cm.args attribute no longer exists at this point
+ # (to break a reference cycle)
+ """
+
+ def __init__(self) -> None:
+ # See https://github.com/python/typeshed/issues/4767 regarding the underscore.
+ self.args: Optional["threading._ExceptHookArgs"] = None
+ self._old_hook: Optional[Callable[["threading._ExceptHookArgs"], Any]] = None
+
+ def _hook(self, args: "threading._ExceptHookArgs") -> None:
+ self.args = args
+
+ def __enter__(self) -> "catch_threading_exception":
+ self._old_hook = threading.excepthook
+ threading.excepthook = self._hook
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ assert self._old_hook is not None
+ threading.excepthook = self._old_hook
+ self._old_hook = None
+ del self.args
+
+
+def thread_exception_runtest_hook() -> Generator[None, None, None]:
+ with catch_threading_exception() as cm:
+ yield
+ if cm.args:
+ if cm.args.thread is not None:
+ thread_name = cm.args.thread.name
+ else:
+ thread_name = "<unknown>"
+ msg = f"Exception in thread {thread_name}\n\n"
+ msg += "".join(
+ traceback.format_exception(
+ cm.args.exc_type, cm.args.exc_value, cm.args.exc_traceback,
+ )
+ )
+ warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))
+
+
+@pytest.hookimpl(hookwrapper=True, trylast=True)
+def pytest_runtest_setup() -> Generator[None, None, None]:
+ yield from thread_exception_runtest_hook()
+
+
+@pytest.hookimpl(hookwrapper=True, tryfirst=True)
+def pytest_runtest_call() -> Generator[None, None, None]:
+ yield from thread_exception_runtest_hook()
+
+
+@pytest.hookimpl(hookwrapper=True, tryfirst=True)
+def pytest_runtest_teardown() -> Generator[None, None, None]:
+ yield from thread_exception_runtest_hook()
We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect
pytest runtime information (issue #185).
-Fixture "mock_timinig" also interacts with this module for pytest's own tests.
+Fixture "mock_timing" also interacts with this module for pytest's own tests.
"""
from time import perf_counter
from time import sleep
import os
import re
import tempfile
+from pathlib import Path
from typing import Optional
import attr
import py
-import pytest
from .pathlib import ensure_reset_dir
from .pathlib import LOCK_TIMEOUT
from .pathlib import make_numbered_dir
from .pathlib import make_numbered_dir_with_cleanup
-from .pathlib import Path
from _pytest.compat import final
from _pytest.config import Config
+from _pytest.deprecated import check_ispytest
+from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
@final
-@attr.s
+@attr.s(init=False)
class TempPathFactory:
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option.
"""
- _given_basetemp = attr.ib(
- type=Optional[Path],
- # Use os.path.abspath() to get absolute path instead of resolve() as it
- # does not work the same in all platforms (see #4427).
- # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
- # Ignore type because of https://github.com/python/mypy/issues/6172.
- converter=attr.converters.optional(
- lambda p: Path(os.path.abspath(str(p))) # type: ignore
- ),
- )
+ _given_basetemp = attr.ib(type=Optional[Path])
_trace = attr.ib()
- _basetemp = attr.ib(type=Optional[Path], default=None)
+ _basetemp = attr.ib(type=Optional[Path])
+
+ def __init__(
+ self,
+ given_basetemp: Optional[Path],
+ trace,
+ basetemp: Optional[Path] = None,
+ *,
+ _ispytest: bool = False,
+ ) -> None:
+ check_ispytest(_ispytest)
+ if given_basetemp is None:
+ self._given_basetemp = None
+ else:
+ # Use os.path.abspath() to get absolute path instead of resolve() as it
+ # does not work the same in all platforms (see #4427).
+ # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
+ self._given_basetemp = Path(os.path.abspath(str(given_basetemp)))
+ self._trace = trace
+ self._basetemp = basetemp
@classmethod
- def from_config(cls, config: Config) -> "TempPathFactory":
- """Create a factory according to pytest configuration."""
+ def from_config(
+ cls, config: Config, *, _ispytest: bool = False,
+ ) -> "TempPathFactory":
+ """Create a factory according to pytest configuration.
+
+ :meta private:
+ """
+ check_ispytest(_ispytest)
return cls(
- given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir")
+ given_basetemp=config.option.basetemp,
+ trace=config.trace.get("tmpdir"),
+ _ispytest=True,
)
def _ensure_relative_to_basetemp(self, basename: str) -> str:
basename = os.path.normpath(basename)
if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
- raise ValueError(
- "{} is not a normalized and relative path".format(basename)
- )
+ raise ValueError(f"{basename} is not a normalized and relative path")
return basename
def mktemp(self, basename: str, numbered: bool = True) -> Path:
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
- rootdir = temproot.joinpath("pytest-of-{}".format(user))
+ rootdir = temproot.joinpath(f"pytest-of-{user}")
rootdir.mkdir(exist_ok=True)
basetemp = make_numbered_dir_with_cleanup(
prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT
@final
-@attr.s
+@attr.s(init=False)
class TempdirFactory:
"""Backward comptibility wrapper that implements :class:``py.path.local``
for :class:``TempPathFactory``."""
_tmppath_factory = attr.ib(type=TempPathFactory)
+ def __init__(
+ self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False
+ ) -> None:
+ check_ispytest(_ispytest)
+ self._tmppath_factory = tmppath_factory
+
def mktemp(self, basename: str, numbered: bool = True) -> py.path.local:
"""Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object."""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve())
to the tmpdir_factory session fixture.
"""
mp = MonkeyPatch()
- tmppath_handler = TempPathFactory.from_config(config)
- t = TempdirFactory(tmppath_handler)
+ tmppath_handler = TempPathFactory.from_config(config, _ispytest=True)
+ t = TempdirFactory(tmppath_handler, _ispytest=True)
config._cleanup.append(mp.undo)
mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
mp.setattr(config, "_tmpdirhandler", t, raising=False)
-@pytest.fixture(scope="session")
+@fixture(scope="session")
def tmpdir_factory(request: FixtureRequest) -> TempdirFactory:
"""Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return request.config._tmpdirhandler # type: ignore
-@pytest.fixture(scope="session")
+@fixture(scope="session")
def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
"""Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return factory.mktemp(name, numbered=True)
-@pytest.fixture
+@fixture
def tmpdir(tmp_path: Path) -> py.path.local:
"""Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory.
+ By default, a new base temporary directory is created each test session,
+ and old bases are removed after 3 sessions, to aid in debugging. If
+ ``--basetemp`` is used then it is cleared each session. See :ref:`base
+ temporary directory`.
+
The returned object is a `py.path.local`_ path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
return py.path.local(tmp_path)
-@pytest.fixture
+@fixture
def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path:
"""Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory.
- The returned object is a :class:`pathlib.Path` object.
-
- .. note::
+ By default, a new base temporary directory is created each test session,
+ and old bases are removed after 3 sessions, to aid in debugging. If
+ ``--basetemp`` is used then it is cleared each session. See :ref:`base
+ temporary directory`.
- In python < 3.6 this is a pathlib2.Path.
+ The returned object is a :class:`pathlib.Path` object.
"""
return _mk_tmp(request, tmp_path_factory)
from typing import List
from typing import Optional
from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
from typing import Union
import _pytest._code
import pytest
from _pytest.compat import getimfunc
from _pytest.compat import is_async_function
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import hookimpl
from _pytest.fixtures import FixtureRequest
from _pytest.nodes import Collector
if TYPE_CHECKING:
import unittest
- from typing import Type
from _pytest.fixtures import _Scope
except Exception:
return None
# Yes, so let's collect it.
- item = UnitTestCase.from_parent(collector, name=name, obj=obj) # type: UnitTestCase
+ item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj)
return item
"""Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding
teardown functions (#517)."""
class_fixture = _make_xunit_fixture(
- cls, "setUpClass", "tearDownClass", scope="class", pass_self=False
+ cls,
+ "setUpClass",
+ "tearDownClass",
+ "doClassCleanups",
+ scope="class",
+ pass_self=False,
)
if class_fixture:
cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined]
method_fixture = _make_xunit_fixture(
- cls, "setup_method", "teardown_method", scope="function", pass_self=True
+ cls,
+ "setup_method",
+ "teardown_method",
+ None,
+ scope="function",
+ pass_self=True,
)
if method_fixture:
cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined]
def _make_xunit_fixture(
- obj: type, setup_name: str, teardown_name: str, scope: "_Scope", pass_self: bool
+ obj: type,
+ setup_name: str,
+ teardown_name: str,
+ cleanup_name: Optional[str],
+ scope: "_Scope",
+ pass_self: bool,
):
setup = getattr(obj, setup_name, None)
teardown = getattr(obj, teardown_name, None)
if setup is None and teardown is None:
return None
- @pytest.fixture(scope=scope, autouse=True)
+ if cleanup_name:
+ cleanup = getattr(obj, cleanup_name, lambda *args: None)
+ else:
+
+ def cleanup(*args):
+ pass
+
+ @pytest.fixture(
+ scope=scope,
+ autouse=True,
+ # Use a unique name to speed up lookup.
+ name=f"unittest_{setup_name}_fixture_{obj.__qualname__}",
+ )
def fixture(self, request: FixtureRequest) -> Generator[None, None, None]:
if _is_skipped(self):
reason = self.__unittest_skip_why__
pytest.skip(reason)
if setup is not None:
- if pass_self:
- setup(self, request.function)
- else:
- setup()
+ try:
+ if pass_self:
+ setup(self, request.function)
+ else:
+ setup()
+ # unittest does not call the cleanup function for every BaseException, so we
+ # follow this here.
+ except Exception:
+ if pass_self:
+ cleanup(self)
+ else:
+ cleanup()
+
+ raise
yield
- if teardown is not None:
+ try:
+ if teardown is not None:
+ if pass_self:
+ teardown(self, request.function)
+ else:
+ teardown()
+ finally:
if pass_self:
- teardown(self, request.function)
+ cleanup(self)
else:
- teardown()
+ cleanup()
return fixture
class TestCaseFunction(Function):
nofuncargs = True
- _excinfo = None # type: Optional[List[_pytest._code.ExceptionInfo[BaseException]]]
- _testcase = None # type: Optional[unittest.TestCase]
+ _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None
+ _testcase: Optional["unittest.TestCase"] = None
def setup(self) -> None:
# A bound method to be called during teardown() if set (see 'runtest()').
- self._explicit_tearDown = None # type: Optional[Callable[[], None]]
+ self._explicit_tearDown: Optional[Callable[[], None]] = None
assert self.parent is not None
self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined]
self._obj = getattr(self._testcase, self.name)
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
- ut = sys.modules["twisted.python.failure"] # type: Any
+ ut: Any = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
--- /dev/null
+import sys
+import traceback
+import warnings
+from types import TracebackType
+from typing import Any
+from typing import Callable
+from typing import Generator
+from typing import Optional
+from typing import Type
+
+import pytest
+
+
+# Copied from cpython/Lib/test/support/__init__.py, with modifications.
+class catch_unraisable_exception:
+ """Context manager catching unraisable exception using sys.unraisablehook.
+
+ Storing the exception value (cm.unraisable.exc_value) creates a reference
+ cycle. The reference cycle is broken explicitly when the context manager
+ exits.
+
+ Storing the object (cm.unraisable.object) can resurrect it if it is set to
+ an object which is being finalized. Exiting the context manager clears the
+ stored object.
+
+ Usage:
+ with catch_unraisable_exception() as cm:
+ # code creating an "unraisable exception"
+ ...
+ # check the unraisable exception: use cm.unraisable
+ ...
+ # cm.unraisable attribute no longer exists at this point
+ # (to break a reference cycle)
+ """
+
+ def __init__(self) -> None:
+ self.unraisable: Optional["sys.UnraisableHookArgs"] = None
+ self._old_hook: Optional[Callable[["sys.UnraisableHookArgs"], Any]] = None
+
+ def _hook(self, unraisable: "sys.UnraisableHookArgs") -> None:
+ # Storing unraisable.object can resurrect an object which is being
+ # finalized. Storing unraisable.exc_value creates a reference cycle.
+ self.unraisable = unraisable
+
+ def __enter__(self) -> "catch_unraisable_exception":
+ self._old_hook = sys.unraisablehook
+ sys.unraisablehook = self._hook
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ assert self._old_hook is not None
+ sys.unraisablehook = self._old_hook
+ self._old_hook = None
+ del self.unraisable
+
+
+def unraisable_exception_runtest_hook() -> Generator[None, None, None]:
+ with catch_unraisable_exception() as cm:
+ yield
+ if cm.unraisable:
+ if cm.unraisable.err_msg is not None:
+ err_msg = cm.unraisable.err_msg
+ else:
+ err_msg = "Exception ignored in"
+ msg = f"{err_msg}: {cm.unraisable.object!r}\n\n"
+ msg += "".join(
+ traceback.format_exception(
+ cm.unraisable.exc_type,
+ cm.unraisable.exc_value,
+ cm.unraisable.exc_traceback,
+ )
+ )
+ warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))
+
+
+@pytest.hookimpl(hookwrapper=True, tryfirst=True)
+def pytest_runtest_setup() -> Generator[None, None, None]:
+ yield from unraisable_exception_runtest_hook()
+
+
+@pytest.hookimpl(hookwrapper=True, tryfirst=True)
+def pytest_runtest_call() -> Generator[None, None, None]:
+ yield from unraisable_exception_runtest_hook()
+
+
+@pytest.hookimpl(hookwrapper=True, tryfirst=True)
+def pytest_runtest_teardown() -> Generator[None, None, None]:
+ yield from unraisable_exception_runtest_hook()
from typing import Any
from typing import Generic
+from typing import Type
from typing import TypeVar
import attr
from _pytest.compat import final
-from _pytest.compat import TYPE_CHECKING
-
-if TYPE_CHECKING:
- from typing import Type # noqa: F401 (used in type string)
class PytestWarning(UserWarning):
__module__ = "pytest"
+@final
+class PytestUnraisableExceptionWarning(PytestWarning):
+ """An unraisable exception was reported.
+
+ Unraisable exceptions are exceptions raised in :meth:`__del__ <object.__del__>`
+ implementations and similar situations when the exception cannot be raised
+ as normal.
+ """
+
+ __module__ = "pytest"
+
+
+@final
+class PytestUnhandledThreadExceptionWarning(PytestWarning):
+ """An unhandled exception occurred in a :class:`~threading.Thread`.
+
+ Such exceptions don't propagate normally.
+ """
+
+ __module__ = "pytest"
+
+
_W = TypeVar("_W", bound=PytestWarning)
as opposed to a direct message.
"""
- category = attr.ib(type="Type[_W]")
+ category = attr.ib(type=Type["_W"])
template = attr.ib(type=str)
def format(self, **kwargs: Any) -> _W:
"""Return an instance of the warning category, formatted with given kwargs."""
return self.category(self.template.format(**kwargs))
-
-
-PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example")
from contextlib import contextmanager
from typing import Generator
from typing import Optional
+from typing import TYPE_CHECKING
import pytest
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import apply_warning_filters
from _pytest.config import Config
from _pytest.config import parse_warning_filter
from . import collect
from _pytest import __version__
from _pytest.assertion import register_assert_rewrite
+from _pytest.cacheprovider import Cache
+from _pytest.capture import CaptureFixture
from _pytest.config import cmdline
from _pytest.config import console_main
from _pytest.config import ExitCode
from _pytest.fixtures import _fillfuncargs
from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureLookupError
+from _pytest.fixtures import FixtureRequest
from _pytest.fixtures import yield_fixture
from _pytest.freeze_support import freeze_includes
+from _pytest.logging import LogCaptureFixture
from _pytest.main import Session
from _pytest.mark import MARK_GEN as mark
from _pytest.mark import param
+from _pytest.monkeypatch import MonkeyPatch
from _pytest.nodes import Collector
from _pytest.nodes import File
from _pytest.nodes import Item
from _pytest.outcomes import importorskip
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
+from _pytest.pytester import Pytester
+from _pytest.pytester import Testdir
from _pytest.python import Class
from _pytest.python import Function
from _pytest.python import Instance
from _pytest.python_api import approx
from _pytest.python_api import raises
from _pytest.recwarn import deprecated_call
+from _pytest.recwarn import WarningsRecorder
from _pytest.recwarn import warns
+from _pytest.tmpdir import TempdirFactory
+from _pytest.tmpdir import TempPathFactory
from _pytest.warning_types import PytestAssertRewriteWarning
from _pytest.warning_types import PytestCacheWarning
from _pytest.warning_types import PytestCollectionWarning
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import PytestExperimentalApiWarning
from _pytest.warning_types import PytestUnhandledCoroutineWarning
+from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
from _pytest.warning_types import PytestUnknownMarkWarning
+from _pytest.warning_types import PytestUnraisableExceptionWarning
from _pytest.warning_types import PytestWarning
set_trace = __pytestPDB.set_trace
"__version__",
"_fillfuncargs",
"approx",
+ "Cache",
+ "CaptureFixture",
"Class",
"cmdline",
"collect",
"File",
"fixture",
"FixtureLookupError",
+ "FixtureRequest",
"freeze_includes",
"Function",
"hookimpl",
"importorskip",
"Instance",
"Item",
+ "LogCaptureFixture",
"main",
"mark",
"Module",
+ "MonkeyPatch",
"Package",
"param",
"PytestAssertRewriteWarning",
"PytestConfigWarning",
"PytestDeprecationWarning",
"PytestExperimentalApiWarning",
+ "Pytester",
"PytestUnhandledCoroutineWarning",
+ "PytestUnhandledThreadExceptionWarning",
"PytestUnknownMarkWarning",
+ "PytestUnraisableExceptionWarning",
"PytestWarning",
"raises",
"register_assert_rewrite",
"Session",
"set_trace",
"skip",
+ "TempPathFactory",
+ "Testdir",
+ "TempdirFactory",
"UsageError",
+ "WarningsRecorder",
"warns",
"xfail",
"yield_fixture",
from _pytest.compat import importlib_metadata
from _pytest.config import ExitCode
from _pytest.pathlib import symlink_or_skip
-from _pytest.pytester import Testdir
+from _pytest.pytester import Pytester
-def prepend_pythonpath(*dirs):
+def prepend_pythonpath(*dirs) -> str:
cur = os.getenv("PYTHONPATH")
if cur:
dirs += (cur,)
class TestGeneralUsage:
- def test_config_error(self, testdir):
- testdir.copy_example("conftest_usageerror/conftest.py")
- result = testdir.runpytest(testdir.tmpdir)
+ def test_config_error(self, pytester: Pytester) -> None:
+ pytester.copy_example("conftest_usageerror/conftest.py")
+ result = pytester.runpytest(pytester.path)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["*ERROR: hello"])
result.stdout.fnmatch_lines(["*pytest_unconfigure_called"])
- def test_root_conftest_syntax_error(self, testdir):
- testdir.makepyfile(conftest="raise SyntaxError\n")
- result = testdir.runpytest()
+ def test_root_conftest_syntax_error(self, pytester: Pytester) -> None:
+ pytester.makepyfile(conftest="raise SyntaxError\n")
+ result = pytester.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
- def test_early_hook_error_issue38_1(self, testdir):
- testdir.makeconftest(
+ def test_early_hook_error_issue38_1(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_sessionstart():
0 / 0
"""
)
- result = testdir.runpytest(testdir.tmpdir)
+ result = pytester.runpytest(pytester.path)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
- result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
+ result = pytester.runpytest(pytester.path, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"]
)
- def test_early_hook_configure_error_issue38(self, testdir):
- testdir.makeconftest(
+ def test_early_hook_configure_error_issue38(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_configure():
0 / 0
"""
)
- result = testdir.runpytest(testdir.tmpdir)
+ result = pytester.runpytest(pytester.path)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
- def test_file_not_found(self, testdir):
- result = testdir.runpytest("asd")
+ def test_file_not_found(self, pytester: Pytester) -> None:
+ result = pytester.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file or directory not found: asd"])
- def test_file_not_found_unconfigure_issue143(self, testdir):
- testdir.makeconftest(
+ def test_file_not_found_unconfigure_issue143(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_configure():
print("---configure")
print("---unconfigure")
"""
)
- result = testdir.runpytest("-s", "asd")
+ result = pytester.runpytest("-s", "asd")
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["ERROR: file or directory not found: asd"])
result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"])
- def test_config_preparse_plugin_option(self, testdir):
- testdir.makepyfile(
+ def test_config_preparse_plugin_option(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
"""
)
- result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
+ result = pytester.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("load_cov_early", [True, False])
- def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
+ def test_early_load_setuptools_name(
+ self, pytester: Pytester, monkeypatch, load_cov_early
+ ) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- testdir.makepyfile(mytestplugin1_module="")
- testdir.makepyfile(mytestplugin2_module="")
- testdir.makepyfile(mycov_module="")
- testdir.syspathinsert()
+ pytester.makepyfile(mytestplugin1_module="")
+ pytester.makepyfile(mytestplugin2_module="")
+ pytester.makepyfile(mycov_module="")
+ pytester.syspathinsert()
loaded = []
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
params = ("-p", "mycov") if load_cov_early else ()
- testdir.runpytest_inprocess(*params)
+ pytester.runpytest_inprocess(*params)
if load_cov_early:
assert loaded == ["mycov", "myplugin1", "myplugin2"]
else:
assert loaded == ["myplugin1", "myplugin2", "mycov"]
@pytest.mark.parametrize("import_mode", ["prepend", "append", "importlib"])
- def test_assertion_rewrite(self, testdir, import_mode):
- p = testdir.makepyfile(
+ def test_assertion_rewrite(self, pytester: Pytester, import_mode) -> None:
+ p = pytester.makepyfile(
"""
def test_this():
x = 0
assert x
"""
)
- result = testdir.runpytest(p, "--import-mode={}".format(import_mode))
+ result = pytester.runpytest(p, f"--import-mode={import_mode}")
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
assert result.ret == 1
- def test_nested_import_error(self, testdir):
- p = testdir.makepyfile(
+ def test_nested_import_error(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
"""
)
- testdir.makepyfile(import_fails="import does_not_work")
- result = testdir.runpytest(p)
+ pytester.makepyfile(import_fails="import does_not_work")
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
)
assert result.ret == 2
- def test_not_collectable_arguments(self, testdir):
- p1 = testdir.makepyfile("")
- p2 = testdir.makefile(".pyc", "123")
- result = testdir.runpytest(p1, p2)
+ def test_not_collectable_arguments(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("")
+ p2 = pytester.makefile(".pyc", "123")
+ result = pytester.runpytest(p1, p2)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(
[
- "ERROR: not found: {}".format(p2),
+ f"ERROR: not found: {p2}",
"(no name {!r} in any of [[][]])".format(str(p2)),
"",
]
)
@pytest.mark.filterwarnings("default")
- def test_better_reporting_on_conftest_load_failure(self, testdir):
+ def test_better_reporting_on_conftest_load_failure(
+ self, pytester: Pytester
+ ) -> None:
"""Show a user-friendly traceback on conftest import failures (#486, #3332)"""
- testdir.makepyfile("")
- conftest = testdir.makeconftest(
+ pytester.makepyfile("")
+ conftest = pytester.makeconftest(
"""
def foo():
import qwerty
foo()
"""
)
- result = testdir.runpytest("--help")
+ result = pytester.runpytest("--help")
result.stdout.fnmatch_lines(
"""
*--version*
*warning*conftest.py*
"""
)
- result = testdir.runpytest()
- exc_name = (
- "ModuleNotFoundError" if sys.version_info >= (3, 6) else "ImportError"
- )
+ result = pytester.runpytest()
assert result.stdout.lines == []
assert result.stderr.lines == [
- "ImportError while loading conftest '{}'.".format(conftest),
+ f"ImportError while loading conftest '{conftest}'.",
"conftest.py:3: in <module>",
" foo()",
"conftest.py:2: in foo",
" import qwerty",
- "E {}: No module named 'qwerty'".format(exc_name),
+ "E ModuleNotFoundError: No module named 'qwerty'",
]
- def test_early_skip(self, testdir):
- testdir.mkdir("xyz")
- testdir.makeconftest(
+ def test_early_skip(self, pytester: Pytester) -> None:
+ pytester.mkdir("xyz")
+ pytester.makeconftest(
"""
import pytest
def pytest_collect_file():
pytest.skip("early")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skip*"])
- def test_issue88_initial_file_multinodes(self, testdir):
- testdir.copy_example("issue88_initial_file_multinodes")
- p = testdir.makepyfile("def test_hello(): pass")
- result = testdir.runpytest(p, "--collect-only")
+ def test_issue88_initial_file_multinodes(self, pytester: Pytester) -> None:
+ pytester.copy_example("issue88_initial_file_multinodes")
+ p = pytester.makepyfile("def test_hello(): pass")
+ result = pytester.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"])
- def test_issue93_initialnode_importing_capturing(self, testdir):
- testdir.makeconftest(
+ def test_issue93_initialnode_importing_capturing(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import sys
print("should not be seen")
sys.stderr.write("stder42\\n")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
- def test_conftest_printing_shows_if_error(self, testdir):
- testdir.makeconftest(
+ def test_conftest_printing_shows_if_error(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
print("should be seen")
assert 0
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
- def test_issue109_sibling_conftests_not_loaded(self, testdir):
- sub1 = testdir.mkdir("sub1")
- sub2 = testdir.mkdir("sub2")
- sub1.join("conftest.py").write("assert 0")
- result = testdir.runpytest(sub2)
+ def test_issue109_sibling_conftests_not_loaded(self, pytester: Pytester) -> None:
+ sub1 = pytester.mkdir("sub1")
+ sub2 = pytester.mkdir("sub2")
+ sub1.joinpath("conftest.py").write_text("assert 0")
+ result = pytester.runpytest(sub2)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- sub2.ensure("__init__.py")
- p = sub2.ensure("test_hello.py")
- result = testdir.runpytest(p)
+ sub2.joinpath("__init__.py").touch()
+ p = sub2.joinpath("test_hello.py")
+ p.touch()
+ result = pytester.runpytest(p)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- result = testdir.runpytest(sub1)
+ result = pytester.runpytest(sub1)
assert result.ret == ExitCode.USAGE_ERROR
- def test_directory_skipped(self, testdir):
- testdir.makeconftest(
+ def test_directory_skipped(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
"""
)
- testdir.makepyfile("def test_hello(): pass")
- result = testdir.runpytest()
+ pytester.makepyfile("def test_hello(): pass")
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skipped*"])
- def test_multiple_items_per_collector_byid(self, testdir):
- c = testdir.makeconftest(
+ def test_multiple_items_per_collector_byid(self, pytester: Pytester) -> None:
+ c = pytester.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
return MyCollector.from_parent(fspath=path, parent=parent)
"""
)
- result = testdir.runpytest(c.basename + "::" + "xyz")
+ result = pytester.runpytest(c.name + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 pass*"])
- def test_skip_on_generated_funcarg_id(self, testdir):
- testdir.makeconftest(
+ def test_skip_on_generated_funcarg_id(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
def pytest_generate_tests(metafunc):
assert 0
"""
)
- p = testdir.makepyfile("""def test_func(x): pass""")
- res = testdir.runpytest(p)
+ p = pytester.makepyfile("""def test_func(x): pass""")
+ res = pytester.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
- def test_direct_addressing_selects(self, testdir):
- p = testdir.makepyfile(
+ def test_direct_addressing_selects(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.parametrize('i', [1, 2], ids=["1", "2"])
pass
"""
)
- res = testdir.runpytest(p.basename + "::" + "test_func[1]")
+ res = pytester.runpytest(p.name + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
- def test_direct_addressing_notfound(self, testdir):
- p = testdir.makepyfile(
+ def test_direct_addressing_notfound(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_func():
pass
"""
)
- res = testdir.runpytest(p.basename + "::" + "test_notfound")
+ res = pytester.runpytest(p.name + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
- def test_docstring_on_hookspec(self):
+ def test_docstring_on_hookspec(self) -> None:
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
- def test_initialization_error_issue49(self, testdir):
- testdir.makeconftest(
+ def test_initialization_error_issue49(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_configure():
x
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"])
assert "sessionstarttime" not in result.stderr.str()
@pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"])
- def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
- testdir.makepyfile(
+ def test_issue134_report_error_when_collecting_member(
+ self, pytester: Pytester, lookfor
+ ) -> None:
+ pytester.makepyfile(
test_fun="""
def test_a():
pass
def"""
)
- result = testdir.runpytest(lookfor)
+ result = pytester.runpytest(lookfor)
result.stdout.fnmatch_lines(["*SyntaxError*"])
if "::" in lookfor:
result.stderr.fnmatch_lines(["*ERROR*"])
assert result.ret == 4 # usage error only if item not found
- def test_report_all_failed_collections_initargs(self, testdir):
- testdir.makeconftest(
+ def test_report_all_failed_collections_initargs(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
from _pytest.config import ExitCode
print("pytest_sessionfinish_called")
"""
)
- testdir.makepyfile(test_a="def", test_b="def")
- result = testdir.runpytest("test_a.py::a", "test_b.py::b")
+ pytester.makepyfile(test_a="def", test_b="def")
+ result = pytester.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"])
result.stdout.fnmatch_lines(["pytest_sessionfinish_called"])
assert result.ret == ExitCode.USAGE_ERROR
- def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
+ def test_namespace_import_doesnt_confuse_import_hook(
+ self, pytester: Pytester
+ ) -> None:
"""Ref #383.
Python 3.3's namespace package messed with our import hooks.
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
"""
- testdir.mkdir("not_a_package")
- p = testdir.makepyfile(
+ pytester.mkdir("not_a_package")
+ p = pytester.makepyfile(
"""
try:
from not_a_package import doesnt_exist
pass
"""
)
- res = testdir.runpytest(p.basename)
+ res = pytester.runpytest(p.name)
assert res.ret == 0
- def test_unknown_option(self, testdir):
- result = testdir.runpytest("--qwlkej")
+ def test_unknown_option(self, pytester: Pytester) -> None:
+ result = pytester.runpytest("--qwlkej")
result.stderr.fnmatch_lines(
"""
*unrecognized*
"""
)
- def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
+ def test_getsourcelines_error_issue553(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
monkeypatch.setattr("inspect.getsourcelines", None)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def raise_error(obj):
raise OSError('source code not available')
pass
"""
)
- res = testdir.runpytest(p)
+ res = pytester.runpytest(p)
res.stdout.fnmatch_lines(
["*source code not available*", "E*fixture 'invalid_fixture' not found"]
)
- def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):
+ def test_plugins_given_as_strings(
+ self, pytester: Pytester, monkeypatch, _sys_snapshot
+ ) -> None:
"""Test that str values passed to main() as `plugins` arg are
interpreted as module names to be imported and registered (#855)."""
with pytest.raises(ImportError) as excinfo:
- pytest.main([str(tmpdir)], plugins=["invalid.module"])
+ pytest.main([str(pytester.path)], plugins=["invalid.module"])
assert "invalid" in str(excinfo.value)
- p = tmpdir.join("test_test_plugins_given_as_strings.py")
- p.write("def test_foo(): pass")
+ p = pytester.path.joinpath("test_test_plugins_given_as_strings.py")
+ p.write_text("def test_foo(): pass")
mod = types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, "myplugin", mod)
- assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0
+ assert pytest.main(args=[str(pytester.path)], plugins=["myplugin"]) == 0
- def test_parametrized_with_bytes_regex(self, testdir):
- p = testdir.makepyfile(
+ def test_parametrized_with_bytes_regex(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import re
import pytest
pass
"""
)
- res = testdir.runpytest(p)
+ res = pytester.runpytest(p)
res.stdout.fnmatch_lines(["*1 passed*"])
- def test_parametrized_with_null_bytes(self, testdir):
+ def test_parametrized_with_null_bytes(self, pytester: Pytester) -> None:
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""\
import pytest
assert data
"""
)
- res = testdir.runpytest(p)
+ res = pytester.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants:
- def test_earlyinit(self, testdir):
- p = testdir.makepyfile(
+ def test_earlyinit(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
assert hasattr(pytest, 'mark')
"""
)
- result = testdir.runpython(p)
+ result = pytester.runpython(p)
assert result.ret == 0
- def test_pydoc(self, testdir):
+ def test_pydoc(self, pytester: Pytester) -> None:
for name in ("py.test", "pytest"):
- result = testdir.runpython_c("import {};help({})".format(name, name))
+ result = pytester.runpython_c(f"import {name};help({name})")
assert result.ret == 0
s = result.stdout.str()
assert "MarkGenerator" in s
- def test_import_star_py_dot_test(self, testdir):
- p = testdir.makepyfile(
+ def test_import_star_py_dot_test(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
from py.test import *
#collect
xfail
"""
)
- result = testdir.runpython(p)
+ result = pytester.runpython(p)
assert result.ret == 0
- def test_import_star_pytest(self, testdir):
- p = testdir.makepyfile(
+ def test_import_star_pytest(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
from pytest import *
#Item
xfail
"""
)
- result = testdir.runpython(p)
+ result = pytester.runpython(p)
assert result.ret == 0
- def test_double_pytestcmdline(self, testdir):
- p = testdir.makepyfile(
+ def test_double_pytestcmdline(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
run="""
import pytest
pytest.main()
pytest.main()
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_hello():
pass
"""
)
- result = testdir.runpython(p)
+ result = pytester.runpython(p)
result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"])
- def test_python_minus_m_invocation_ok(self, testdir):
- p1 = testdir.makepyfile("def test_hello(): pass")
- res = testdir.run(sys.executable, "-m", "pytest", str(p1))
+ def test_python_minus_m_invocation_ok(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test_hello(): pass")
+ res = pytester.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
- def test_python_minus_m_invocation_fail(self, testdir):
- p1 = testdir.makepyfile("def test_fail(): 0/0")
- res = testdir.run(sys.executable, "-m", "pytest", str(p1))
+ def test_python_minus_m_invocation_fail(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test_fail(): 0/0")
+ res = pytester.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
- def test_python_pytest_package(self, testdir):
- p1 = testdir.makepyfile("def test_pass(): pass")
- res = testdir.run(sys.executable, "-m", "pytest", str(p1))
+ def test_python_pytest_package(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test_pass(): pass")
+ res = pytester.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
):
pytest.main("-h") # type: ignore[arg-type]
- def test_invoke_with_path(self, tmpdir: py.path.local, capsys) -> None:
- retcode = pytest.main(tmpdir)
+ def test_invoke_with_path(self, pytester: Pytester, capsys) -> None:
+ retcode = pytest.main([str(pytester.path)])
assert retcode == ExitCode.NO_TESTS_COLLECTED
out, err = capsys.readouterr()
- def test_invoke_plugin_api(self, capsys):
+ def test_invoke_plugin_api(self, capsys) -> None:
class MyPlugin:
def pytest_addoption(self, parser):
parser.addoption("--myopt")
out, err = capsys.readouterr()
assert "--myopt" in out
- def test_pyargs_importerror(self, testdir, monkeypatch):
+ def test_pyargs_importerror(self, pytester: Pytester, monkeypatch) -> None:
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
- path = testdir.mkpydir("tpkg")
- path.join("test_hello.py").write("raise ImportError")
+ path = pytester.mkpydir("tpkg")
+ path.joinpath("test_hello.py").write_text("raise ImportError")
- result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
+ result = pytester.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
- def test_pyargs_only_imported_once(self, testdir):
- pkg = testdir.mkpydir("foo")
- pkg.join("test_foo.py").write("print('hello from test_foo')\ndef test(): pass")
- pkg.join("conftest.py").write(
+ def test_pyargs_only_imported_once(self, pytester: Pytester) -> None:
+ pkg = pytester.mkpydir("foo")
+ pkg.joinpath("test_foo.py").write_text(
+ "print('hello from test_foo')\ndef test(): pass"
+ )
+ pkg.joinpath("conftest.py").write_text(
"def pytest_configure(config): print('configuring')"
)
- result = testdir.runpytest("--pyargs", "foo.test_foo", "-s", syspathinsert=True)
+ result = pytester.runpytest(
+ "--pyargs", "foo.test_foo", "-s", syspathinsert=True
+ )
# should only import once
assert result.outlines.count("hello from test_foo") == 1
# should only configure once
assert result.outlines.count("configuring") == 1
- def test_pyargs_filename_looks_like_module(self, testdir):
- testdir.tmpdir.join("conftest.py").ensure()
- testdir.tmpdir.join("t.py").write("def test(): pass")
- result = testdir.runpytest("--pyargs", "t.py")
+ def test_pyargs_filename_looks_like_module(self, pytester: Pytester) -> None:
+ pytester.path.joinpath("conftest.py").touch()
+ pytester.path.joinpath("t.py").write_text("def test(): pass")
+ result = pytester.runpytest("--pyargs", "t.py")
assert result.ret == ExitCode.OK
- def test_cmdline_python_package(self, testdir, monkeypatch):
+ def test_cmdline_python_package(self, pytester: Pytester, monkeypatch) -> None:
import warnings
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
- path = testdir.mkpydir("tpkg")
- path.join("test_hello.py").write("def test_hello(): pass")
- path.join("test_world.py").write("def test_world(): pass")
- result = testdir.runpytest("--pyargs", "tpkg")
+ path = pytester.mkpydir("tpkg")
+ path.joinpath("test_hello.py").write_text("def test_hello(): pass")
+ path.joinpath("test_world.py").write_text("def test_world(): pass")
+ result = pytester.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
- result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
+ result = pytester.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
- empty_package = testdir.mkpydir("empty_package")
+ empty_package = pytester.mkpydir("empty_package")
monkeypatch.setenv("PYTHONPATH", str(empty_package), prepend=os.pathsep)
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
- result = testdir.runpytest("--pyargs", ".")
+ result = pytester.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
- monkeypatch.setenv("PYTHONPATH", str(testdir), prepend=os.pathsep)
- result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
+ monkeypatch.setenv("PYTHONPATH", str(pytester), prepend=os.pathsep)
+ result = pytester.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
assert result.ret != 0
result.stderr.fnmatch_lines(["*not*found*test_missing*"])
- def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
+ def test_cmdline_python_namespace_package(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
"""Test --pyargs option with namespace packages (#1567).
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
search_path = []
for dirname in "hello", "world":
- d = testdir.mkdir(dirname)
+ d = pytester.mkdir(dirname)
search_path.append(d)
- ns = d.mkdir("ns_pkg")
- ns.join("__init__.py").write(
+ ns = d.joinpath("ns_pkg")
+ ns.mkdir()
+ ns.joinpath("__init__.py").write_text(
"__import__('pkg_resources').declare_namespace(__name__)"
)
- lib = ns.mkdir(dirname)
- lib.ensure("__init__.py")
- lib.join("test_{}.py".format(dirname)).write(
- "def test_{}(): pass\ndef test_other():pass".format(dirname)
+ lib = ns.joinpath(dirname)
+ lib.mkdir()
+ lib.joinpath("__init__.py").touch()
+ lib.joinpath(f"test_{dirname}.py").write_text(
+ f"def test_{dirname}(): pass\ndef test_other():pass"
)
# The structure of the test directory is now:
# mixed module and filenames:
monkeypatch.chdir("world")
- result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
+ result = pytester.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
)
# specify tests within a module
- testdir.chdir()
- result = testdir.runpytest(
+ pytester.chdir()
+ result = pytester.runpytest(
"--pyargs", "-v", "ns_pkg.world.test_world::test_other"
)
assert result.ret == 0
["*test_world.py::test_other*PASSED*", "*1 passed*"]
)
- def test_invoke_test_and_doctestmodules(self, testdir):
- p = testdir.makepyfile(
+ def test_invoke_test_and_doctestmodules(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test():
pass
"""
)
- result = testdir.runpytest(str(p) + "::test", "--doctest-modules")
+ result = pytester.runpytest(str(p) + "::test", "--doctest-modules")
result.stdout.fnmatch_lines(["*1 passed*"])
- def test_cmdline_python_package_symlink(self, testdir, monkeypatch):
+ def test_cmdline_python_package_symlink(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
"""
--pyargs with packages with path containing symlink can have conftest.py in
their package (#2985)
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
dirname = "lib"
- d = testdir.mkdir(dirname)
- foo = d.mkdir("foo")
- foo.ensure("__init__.py")
- lib = foo.mkdir("bar")
- lib.ensure("__init__.py")
- lib.join("test_bar.py").write(
+ d = pytester.mkdir(dirname)
+ foo = d.joinpath("foo")
+ foo.mkdir()
+ foo.joinpath("__init__.py").touch()
+ lib = foo.joinpath("bar")
+ lib.mkdir()
+ lib.joinpath("__init__.py").touch()
+ lib.joinpath("test_bar.py").write_text(
"def test_bar(): pass\ndef test_other(a_fixture):pass"
)
- lib.join("conftest.py").write(
+ lib.joinpath("conftest.py").write_text(
"import pytest\n@pytest.fixture\ndef a_fixture():pass"
)
- d_local = testdir.mkdir("symlink_root")
+ d_local = pytester.mkdir("symlink_root")
symlink_location = d_local / "lib"
symlink_or_skip(d, symlink_location, target_is_directory=True)
# module picked up in symlink-ed directory:
# It picks up symlink_root/lib/foo/bar (symlink) via sys.path.
- result = testdir.runpytest("--pyargs", "-v", "foo.bar")
- testdir.chdir()
+ result = pytester.runpytest("--pyargs", "-v", "foo.bar")
+ pytester.chdir()
assert result.ret == 0
result.stdout.fnmatch_lines(
[
]
)
- def test_cmdline_python_package_not_exists(self, testdir):
- result = testdir.runpytest("--pyargs", "tpkgwhatv")
+ def test_cmdline_python_package_not_exists(self, pytester: Pytester) -> None:
+ result = pytester.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines(["ERROR*module*or*package*not*found*"])
@pytest.mark.xfail(reason="decide: feature or bug")
- def test_noclass_discovery_if_not_testcase(self, testdir):
- testpath = testdir.makepyfile(
+ def test_noclass_discovery_if_not_testcase(self, pytester: Pytester) -> None:
+ testpath = pytester.makepyfile(
"""
import unittest
class TestHello(object):
attr = 42
"""
)
- reprec = testdir.inline_run(testpath)
+ reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=1)
- def test_doctest_id(self, testdir):
- testdir.makefile(
+ def test_doctest_id(self, pytester: Pytester) -> None:
+ pytester.makefile(
".txt",
"""
>>> x=3
"FAILED test_doctest_id.txt::test_doctest_id.txt",
"*= 1 failed in*",
]
- result = testdir.runpytest(testid, "-rf", "--tb=short")
+ result = pytester.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
# Ensure that re-running it will still handle it as
# doctest.DocTestFailure, which was not the case before when
# re-importing doctest, but not creating a new RUNNER_CLASS.
- result = testdir.runpytest(testid, "-rf", "--tb=short")
+ result = pytester.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
- def test_core_backward_compatibility(self):
+ def test_core_backward_compatibility(self) -> None:
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
is _pytest.config.PytestPluginManager
)
- def test_has_plugin(self, request):
+ def test_has_plugin(self, request) -> None:
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin("python")
timing.sleep(0.020)
"""
- def test_calls(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=10")
+ def test_calls(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["(8 durations < 0.005s hidden. Use -vv to show these durations.)"]
)
- def test_calls_show_2(self, testdir, mock_timing):
+ def test_calls_show_2(self, pytester: Pytester, mock_timing) -> None:
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=2")
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
- def test_calls_showall(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=0")
+ def test_calls_showall(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=0")
assert result.ret == 0
tested = "3"
if ("test_%s" % x) in line and y in line:
break
else:
- raise AssertionError("not found {} {}".format(x, y))
+ raise AssertionError(f"not found {x} {y}")
- def test_calls_showall_verbose(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=0", "-vv")
+ def test_calls_showall_verbose(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=0", "-vv")
assert result.ret == 0
for x in "123":
if ("test_%s" % x) in line and y in line:
break
else:
- raise AssertionError("not found {} {}".format(x, y))
+ raise AssertionError(f"not found {x} {y}")
- def test_with_deselected(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=2", "-k test_3")
+ def test_with_deselected(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=2", "-k test_3")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_3*"])
- def test_with_failing_collection(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- testdir.makepyfile(test_collecterror="""xyz""")
- result = testdir.runpytest_inprocess("--durations=2", "-k test_1")
+ def test_with_failing_collection(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ pytester.makepyfile(test_collecterror="""xyz""")
+ result = pytester.runpytest_inprocess("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# output
result.stdout.no_fnmatch_line("*duration*")
- def test_with_not(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("-k not 1")
+ def test_with_not(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("-k not 1")
assert result.ret == 0
timing.sleep(5)
"""
- def test_setup_function(self, testdir, mock_timing):
- testdir.makepyfile(self.source)
- result = testdir.runpytest_inprocess("--durations=10")
+ def test_setup_function(self, pytester: Pytester, mock_timing) -> None:
+ pytester.makepyfile(self.source)
+ result = pytester.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
)
-def test_zipimport_hook(testdir, tmpdir):
+def test_zipimport_hook(pytester: Pytester) -> None:
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip("zipapp")
- testdir.tmpdir.join("app").ensure(dir=1)
- testdir.makepyfile(
+ pytester.path.joinpath("app").mkdir()
+ pytester.makepyfile(
**{
"app/foo.py": """
import pytest
"""
}
)
- target = tmpdir.join("foo.zip")
- zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main")
- result = testdir.runpython(target)
+ target = pytester.path.joinpath("foo.zip")
+ zipapp.create_archive(
+ str(pytester.path.joinpath("app")), str(target), main="foo:main"
+ )
+ result = pytester.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
result.stdout.no_fnmatch_line("*INTERNALERROR>*")
-def test_import_plugin_unicode_name(testdir):
- testdir.makepyfile(myplugin="")
- testdir.makepyfile("def test(): pass")
- testdir.makeconftest("pytest_plugins = ['myplugin']")
- r = testdir.runpytest()
+def test_import_plugin_unicode_name(pytester: Pytester) -> None:
+ pytester.makepyfile(myplugin="")
+ pytester.makepyfile("def test(): pass")
+ pytester.makeconftest("pytest_plugins = ['myplugin']")
+ r = pytester.runpytest()
assert r.ret == 0
-def test_pytest_plugins_as_module(testdir):
+def test_pytest_plugins_as_module(pytester: Pytester) -> None:
"""Do not raise an error if pytest_plugins attribute is a module (#3899)"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"__init__.py": "",
"pytest_plugins.py": "",
"test_foo.py": "def test(): pass",
}
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
-def test_deferred_hook_checking(testdir):
+def test_deferred_hook_checking(pytester: Pytester) -> None:
"""Check hooks as late as possible (#1821)."""
- testdir.syspathinsert()
- testdir.makepyfile(
+ pytester.syspathinsert()
+ pytester.makepyfile(
**{
"plugin.py": """
class Hooks(object):
""",
}
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed *"])
-def test_fixture_values_leak(testdir):
+def test_fixture_values_leak(pytester: Pytester) -> None:
"""Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected
life-times (#2981).
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import attr
import gc
# Running on subprocess does not activate the HookRecorder
# which holds itself a reference to objects in case of the
# pytest_assert_reprcompare hook
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["* 2 passed *"])
-def test_fixture_order_respects_scope(testdir):
+def test_fixture_order_respects_scope(pytester: Pytester) -> None:
"""Ensure that fixtures are created according to scope order (#2405)."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
assert data.get('value')
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 0
-def test_frame_leak_on_failing_test(testdir):
+def test_frame_leak_on_failing_test(pytester: Pytester) -> None:
"""Pytest would leak garbage referencing the frames of tests that failed
that could never be reclaimed (#2798).
are made of traceback objects which cannot be weakly referenced. Those objects at least
can be eventually claimed by the garbage collector.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import gc
import weakref
assert ref() is None
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"])
-def test_fixture_mock_integration(testdir):
+def test_fixture_mock_integration(pytester: Pytester) -> None:
"""Test that decorators applied to fixture are left working (#3774)"""
- p = testdir.copy_example("acceptance/fixture_mock_integration.py")
- result = testdir.runpytest(p)
+ p = pytester.copy_example("acceptance/fixture_mock_integration.py")
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_usage_error_code(testdir):
- result = testdir.runpytest("-unknown-option-")
+def test_usage_error_code(pytester: Pytester) -> None:
+ result = pytester.runpytest("-unknown-option-")
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.filterwarnings("default")
-def test_warn_on_async_function(testdir):
+def test_warn_on_async_function(pytester: Pytester) -> None:
# In the below we .close() the coroutine only to avoid
# "RuntimeWarning: coroutine 'test_2' was never awaited"
# which messes with other tests.
- testdir.makepyfile(
+ pytester.makepyfile(
test_async="""
async def test_1():
pass
return coro
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
@pytest.mark.filterwarnings("default")
-@pytest.mark.skipif(
- sys.version_info < (3, 6), reason="async gen syntax available in Python 3.6+"
-)
-def test_warn_on_async_gen_function(testdir):
- testdir.makepyfile(
+def test_warn_on_async_gen_function(pytester: Pytester) -> None:
+ pytester.makepyfile(
test_async="""
async def test_1():
yield
return test_2()
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
)
-def test_pdb_can_be_rewritten(testdir):
- testdir.makepyfile(
+def test_pdb_can_be_rewritten(pytester: Pytester) -> None:
+ pytester.makepyfile(
**{
"conftest.py": """
import pytest
)
# Disable debugging plugin itself to avoid:
# > INTERNALERROR> AttributeError: module 'pdb' has no attribute 'set_trace'
- result = testdir.runpytest_subprocess("-p", "no:debugging", "-vv")
+ result = pytester.runpytest_subprocess("-p", "no:debugging", "-vv")
result.stdout.fnmatch_lines(
[
" def check():",
assert result.ret == 1
-def test_tee_stdio_captures_and_live_prints(testdir):
- testpath = testdir.makepyfile(
+def test_tee_stdio_captures_and_live_prints(pytester: Pytester) -> None:
+ testpath = pytester.makepyfile(
"""
import sys
def test_simple():
print ("@this is stderr@", file=sys.stderr)
"""
)
- result = testdir.runpytest_subprocess(
+ result = pytester.runpytest_subprocess(
testpath,
"--capture=tee-sys",
"--junitxml=output.xml",
result.stderr.fnmatch_lines(["*@this is stderr@*"])
# now ensure the output is in the junitxml
- with open(os.path.join(testdir.tmpdir.strpath, "output.xml")) as f:
+ with open(pytester.path.joinpath("output.xml")) as f:
fullXml = f.read()
assert "@this is stdout@\n" in fullXml
assert "@this is stderr@\n" in fullXml
sys.platform == "win32",
reason="Windows raises `OSError: [Errno 22] Invalid argument` instead",
)
-def test_no_brokenpipeerror_message(testdir: Testdir) -> None:
+def test_no_brokenpipeerror_message(pytester: Pytester) -> None:
"""Ensure that the broken pipe error message is supressed.
In some Python versions, it reaches sys.unraisablehook, in others
a BrokenPipeError exception is propagated, but either way it prints
to stderr on shutdown, so checking nothing is printed is enough.
"""
- popen = testdir.popen((*testdir._getpytestargs(), "--help"))
+ popen = pytester.popen((*pytester._getpytestargs(), "--help"))
popen.stdout.close()
ret = popen.wait()
assert popen.stderr.read() == b""
assert ret == 1
+
+ # Cleanup.
+ popen.stderr.close()
assert code.fullsource is None
-def test_code_with_class() -> None:
+def test_code_from_function_with_class() -> None:
class A:
pass
- pytest.raises(TypeError, Code, A)
+ with pytest.raises(TypeError):
+ Code.from_function(A)
def x() -> None:
def test_code_fullsource() -> None:
- code = Code(x)
+ code = Code.from_function(x)
full = code.fullsource
assert "test_code_fullsource()" in str(full)
def test_code_source() -> None:
- code = Code(x)
+ code = Code.from_function(x)
src = code.source()
expected = """def x() -> None:
raise NotImplementedError()"""
def test_code_from_func() -> None:
- co = Code(test_frame_getsourcelineno_myself)
+ co = Code.from_function(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def f1(x):
raise NotImplementedError()
- c1 = Code(f1)
+ c1 = Code.from_function(f1)
assert c1.getargs(var=True) == ("x",)
def f2(x, *y):
raise NotImplementedError()
- c2 = Code(f2)
+ c2 = Code.from_function(f2)
assert c2.getargs(var=True) == ("x", "y")
def f3(x, **z):
raise NotImplementedError()
- c3 = Code(f3)
+ c3 = Code.from_function(f3)
assert c3.getargs(var=True) == ("x", "z")
def f4(x, *y, **z):
raise NotImplementedError()
- c4 = Code(f4)
+ c4 = Code.from_function(f4)
assert c4.getargs(var=True) == ("x", "y", "z")
+import importlib
import io
import operator
import os
from typing import Any
from typing import Dict
from typing import Tuple
+from typing import TYPE_CHECKING
from typing import Union
import py
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import FormattedExcinfo
from _pytest._io import TerminalWriter
-from _pytest.compat import TYPE_CHECKING
from _pytest.pytester import LineMatcher
-try:
- import importlib
-except ImportError:
- invalidate_import_caches = None
-else:
- invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
-
if TYPE_CHECKING:
from _pytest._code.code import _TracebackStyle
]
def test_traceback_cut(self):
- co = _pytest._code.Code(f)
+ co = _pytest._code.Code.from_function(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
excinfo = pytest.raises(ValueError, h)
traceback = excinfo.traceback
ntraceback = traceback.filter()
- print("old: {!r}".format(traceback))
- print("new: {!r}".format(ntraceback))
+ print(f"old: {traceback!r}")
+ print(f"new: {ntraceback!r}")
if matching:
assert len(ntraceback) == len(traceback) - 2
decorator = pytest.importorskip("decorator").decorator
def log(f, *k, **kw):
- print("{} {}".format(k, kw))
+ print(f"{k} {kw}")
f(*k, **kw)
log = decorator(log)
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
- co = _pytest._code.Code(h)
+ co = _pytest._code.Code.from_function(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == "h"
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
- co = _pytest._code.Code(g)
+ co = _pytest._code.Code.from_function(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == "g"
assert result.ret != 0
exc_msg = "Regex pattern '[[]123[]]+' does not match 'division by zero'."
- result.stdout.fnmatch_lines(["E * AssertionError: {}".format(exc_msg)])
+ result.stdout.fnmatch_lines([f"E * AssertionError: {exc_msg}"])
result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
result = testdir.runpytest("--fulltrace")
assert result.ret != 0
result.stdout.fnmatch_lines(
- ["*__tracebackhide__ = True*", "E * AssertionError: {}".format(exc_msg)]
+ ["*__tracebackhide__ = True*", f"E * AssertionError: {exc_msg}"]
)
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
- if invalidate_import_caches is not None:
- invalidate_import_caches()
+ importlib.invalidate_caches()
return modpath.pyimport()
return importasmod
from _pytest._code.code import Code
monkeypatch.setattr(Code, "path", "bogus")
- excinfo.traceback[0].frame.code.path = "bogus" # type: ignore[misc]
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
)
excinfo = pytest.raises(ValueError, mod.entry)
- styles = ("long", "short") # type: Tuple[_TracebackStyle, ...]
+ styles: Tuple[_TracebackStyle, ...] = ("long", "short")
for style in styles:
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
"def entry():",
"> f(0)",
"",
- "{}:5: ".format(mod.__file__),
+ f"{mod.__file__}:5: ",
"_ _ *",
"",
" def f(x):",
"> raise ValueError(x)",
"E ValueError: 0",
"",
- "{}:3: ValueError".format(mod.__file__),
+ f"{mod.__file__}:3: ValueError",
]
)
assert raised == 3
)
excinfo = pytest.raises(ValueError, mod.entry)
- styles = ("short", "long", "no") # type: Tuple[_TracebackStyle, ...]
+ styles: Tuple[_TracebackStyle, ...] = ("short", "long", "no")
for style in styles:
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
@pytest.mark.parametrize("encoding", [None, "utf8", "utf16"])
def test_repr_traceback_with_unicode(style, encoding):
if encoding is None:
- msg = "☹" # type: Union[str, bytes]
+ msg: Union[str, bytes] = "☹"
else:
msg = "☹".encode(encoding)
try:
import py.path
-import _pytest._code
import pytest
+from _pytest._code import Code
+from _pytest._code import Frame
from _pytest._code import getfslineno
from _pytest._code import Source
def test_source_from_function() -> None:
- source = _pytest._code.Source(test_source_str_function)
+ source = Source(test_source_str_function)
assert str(source).startswith("def test_source_str_function() -> None:")
def test_method(self):
pass
- source = _pytest._code.Source(TestClass().test_method)
+ source = Source(TestClass().test_method)
assert source.lines == ["def test_method(self):", " pass"]
def test_source_from_lines() -> None:
lines = ["a \n", "b\n", "c"]
- source = _pytest._code.Source(lines)
+ source = Source(lines)
assert source.lines == ["a ", "b", "c"]
def f():
raise NotImplementedError()
- source = _pytest._code.Source(f)
+ source = Source(f)
assert str(source).startswith("def f():")
class A:
def __init__(self, *args) -> None:
frame = sys._getframe(1)
- self.source = _pytest._code.Frame(frame).statement
+ self.source = Frame(frame).statement
x = A("x", "y")
def g():
pass # pragma: no cover
- f_source = _pytest._code.Source(f)
- g_source = _pytest._code.Source(g)
+ f_source = Source(f)
+ g_source = Source(g)
assert str(f_source).strip() == "def f():\n raise NotImplementedError()"
assert str(g_source).strip() == "def g():\n pass # pragma: no cover"
pass
"""
'''
- assert str(_pytest._code.Source(f)) == expected.rstrip()
+ assert str(Source(f)) == expected.rstrip()
def test_deindent() -> None:
def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot) -> None:
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
- source = _pytest._code.Source(
+ source = Source(
"""
- class A(object):
+ class A:
def method(self):
x = 1
"""
)
path = tmpdir.join("a.py")
path.write(source)
- s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
+ s2 = Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
assert src is not None
assert "if 1:" in str(src)
- d = {} # type: Dict[str, Any]
+ d: Dict[str, Any] = {}
eval(co, d)
src, lineno = findsource(d["x"])
assert src is not None
B.__name__ = B.__qualname__ = "B2"
assert getfslineno(B)[1] == -1
- co = compile("...", "", "eval")
- assert co.co_filename == ""
-
- if hasattr(sys, "pypy_version_info"):
- assert getfslineno(co) == ("", -1)
- else:
- assert getfslineno(co) == ("", 0)
-
def test_code_of_object_instance_with_call() -> None:
class A:
pass
- pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
+ pytest.raises(TypeError, lambda: Source(A()))
class WithCall:
def __call__(self) -> None:
pass
- code = _pytest._code.Code(WithCall())
+ code = Code.from_function(WithCall())
assert "pass" in str(code.source())
class Hello:
def __call__(self) -> None:
pass
- pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
+ pytest.raises(TypeError, lambda: Code.from_function(Hello))
def getstatement(lineno: int, source) -> Source:
from _pytest._code.source import getstatementrange_ast
- src = _pytest._code.Source(source)
+ src = Source(source)
ast, start, end = getstatementrange_ast(lineno, src)
return src[start:end]
class A:
def __init__(self, *args):
frame = sys._getframe(1)
- self.source = _pytest._code.Frame(frame).statement
+ self.source = Frame(frame).statement
# fmt: off
x = A('x',
from typing import List
import pytest
-from _pytest.pytester import RunResult
-from _pytest.pytester import Testdir
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
if sys.gettrace():
# (https://github.com/pytest-dev/pytest/issues/5070)
neutral_items.append(item)
else:
- if "testdir" in fixtures:
+ if "pytester" in fixtures:
co_names = item.function.__code__.co_names
if spawn_names.intersection(co_names):
item.add_marker(pytest.mark.uses_pexpect)
@pytest.fixture
-def dummy_yaml_custom_test(testdir):
+def dummy_yaml_custom_test(pytester: Pytester):
"""Writes a conftest file that collects and executes a dummy yaml test.
Taken from the docs, but stripped down to the bare minimum, useful for
tests which needs custom items collected.
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
pass
"""
)
- testdir.makefile(".yaml", test1="")
+ pytester.makefile(".yaml", test1="")
@pytest.fixture
-def testdir(testdir: Testdir) -> Testdir:
- testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
- return testdir
+def pytester(pytester: Pytester, monkeypatch: MonkeyPatch) -> Pytester:
+ monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
+ return pytester
@pytest.fixture(scope="session")
"""Replace color names for use with LineMatcher.re_match_lines"""
return [line.format(**cls.RE_COLORS) for line in lines]
- @classmethod
- def requires_ordered_markup(cls, result: RunResult):
- """Should be called if a test expects markup to appear in the output
- in the order they were passed, for example:
-
- tw.write(line, bold=True, red=True)
-
- In Python 3.5 there's no guarantee that the generated markup will appear
- in the order called, so we do some limited color testing and skip the rest of
- the test.
- """
- if sys.version_info < (3, 6):
- # terminal writer.write accepts keyword arguments, so
- # py36+ is required so the markup appears in the expected order
- output = result.stdout.str()
- assert "test session starts" in output
- assert "\x1b[1m" in output
- pytest.skip(
- "doing limited testing because lacking ordered markup on py35"
- )
-
return ColorMapping
@pytest.fixture
-def mock_timing(monkeypatch):
+def mock_timing(monkeypatch: MonkeyPatch):
"""Mocks _pytest.timing with a known object that can be used to control timing in tests
deterministically.
import pytest
from _pytest import deprecated
+from _pytest.pytester import Pytester
from _pytest.pytester import Testdir
session.gethookproxy(testdir.tmpdir)
session.isinitpath(testdir.tmpdir)
assert len(rec) == 0
+
+
+def test_strict_option_is_deprecated(pytester: Pytester) -> None:
+ """--strict is a deprecated alias to --strict-markers (#7530)."""
+ pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.unknown
+ def test_foo(): pass
+ """
+ )
+ result = pytester.runpytest("--strict")
+ result.stdout.fnmatch_lines(
+ [
+ "'unknown' not found in `markers` configuration option",
+ "*PytestDeprecationWarning: The --strict option is deprecated, use --strict-markers instead.",
+ ]
+ )
+
+
+def test_yield_fixture_is_deprecated() -> None:
+ with pytest.warns(DeprecationWarning, match=r"yield_fixture is deprecated"):
+
+ @pytest.yield_fixture
+ def fix():
+ assert False
+
+
+def test_private_is_deprecated() -> None:
+ class PrivateInit:
+ def __init__(self, foo: int, *, _ispytest: bool = False) -> None:
+ deprecated.check_ispytest(_ispytest)
+
+ with pytest.warns(
+ pytest.PytestDeprecationWarning, match="private pytest class or function"
+ ):
+ PrivateInit(10)
+
+ # Doesn't warn.
+ PrivateInit(10, _ispytest=True)
@pytest.fixture(scope="session")
def checked_order():
- order = [] # type: List[Tuple[str, str, str]]
+ order: List[Tuple[str, str, str]] = []
yield order
pprint.pprint(order)
from unittest import IsolatedAsyncioTestCase # type: ignore
-teardowns = [] # type: List[None]
+teardowns: List[None] = []
class AsyncArguments(IsolatedAsyncioTestCase):
import asynctest
-teardowns = [] # type: List[None]
+teardowns: List[None] = []
class Test(asynctest.TestCase):
logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
- record = logging.LogRecord(
+ record: Any = logging.LogRecord(
name="dummy",
level=logging.INFO,
pathname="dummypath",
msg="Test Message line1\nline2",
args=(),
exc_info=None,
- ) # type: Any
+ )
# this is called by logging.Formatter.format
record.message = record.getMessage()
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
- "{}::test_log_1 ".format(filename),
+ f"{filename}::test_log_1 ",
"*WARNING*log message from test_log_1*",
"PASSED *50%*",
- "{}::test_log_2 ".format(filename),
+ f"{filename}::test_log_2 ",
"*WARNING*log message from test_log_2*",
"PASSED *100%*",
"=* 2 passed in *=",
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
- "{}::test_log_1 ".format(filename),
+ f"{filename}::test_log_1 ",
"*-- live log start --*",
"*WARNING* >>>>> START >>>>>*",
"*-- live log setup --*",
"*WARNING*log message from teardown of test_log_1*",
"*-- live log finish --*",
"*WARNING* <<<<< END <<<<<<<*",
- "{}::test_log_2 ".format(filename),
+ f"{filename}::test_log_2 ",
"*-- live log start --*",
"*WARNING* >>>>> START >>>>>*",
"*-- live log setup --*",
result.stdout.fnmatch_lines(
[
"*WARNING*Unknown Section*",
- "{}::test_log_1 ".format(filename),
+ f"{filename}::test_log_1 ",
"*WARNING* >>>>> START >>>>>*",
"*-- live log setup --*",
"*WARNING*log message from setup of test_log_1*",
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
- "{}::test_log_1 ".format(filename),
+ f"{filename}::test_log_1 ",
"*-- live log start --*",
"*WARNING* >>>>> START >>>>>*",
"*-- live log setup --*",
log_file = testdir.tmpdir.join("pytest.log").strpath
result = testdir.runpytest(
- "-s", "--log-file={}".format(log_file), "--log-file-level=WARNING"
+ "-s", f"--log-file={log_file}", "--log-file-level=WARNING"
)
# fnmatch_lines does an assertion internally
log_file = testdir.tmpdir.join("pytest.log").strpath
- result = testdir.runpytest(
- "-s", "--log-file={}".format(log_file), "--log-file-level=INFO"
- )
+ result = testdir.runpytest("-s", f"--log-file={log_file}", "--log-file-level=INFO")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["test_log_file_cli_level.py PASSED"])
[
"*collected 1 item*",
"*<Module test_collection_collect_only_live_logging.py>*",
- "*no tests ran*",
+ "*1 test collected*",
]
)
elif verbose == "-q":
expected_lines.extend(
[
"*test_collection_collect_only_live_logging.py::test_simple*",
- "no tests ran in [0-9].[0-9][0-9]s",
+ "1 test collected in [0-9].[0-9][0-9]s",
]
)
elif verbose == "-qq":
--- /dev/null
+anyio[curio,trio]==2.0.2
+django==3.1.4
+pytest-asyncio==0.14.0
+pytest-bdd==4.0.1
+pytest-cov==2.10.1
+pytest-django==4.1.0
+pytest-flakes==4.0.3
+pytest-html==3.1.0
+pytest-mock==3.3.1
+pytest-rerunfailures==9.1.1
+pytest-sugar==0.9.4
+pytest-trio==0.7.0
+pytest-twisted==1.13.2
+twisted==20.3.0
+pytest-xvfb==2.0.0
import operator
+import sys
from decimal import Decimal
from fractions import Fraction
from operator import eq
from typing import Optional
import pytest
+from _pytest.pytester import Pytester
from pytest import approx
inf, nan = float("inf"), float("nan")
assert (1, 2) != approx((1,))
assert (1, 2) != approx((1, 2, 3))
+ def test_tuple_vs_other(self):
+ assert 1 != approx((1,))
+
def test_dict(self):
actual = {"a": 1 + 1e-7, "b": 2 + 1e-8}
# Dictionaries became ordered in python3.6, so switch up the order here
assert {"a": 1, "b": 2} != approx({"a": 1, "c": 2})
assert {"a": 1, "b": 2} != approx({"a": 1, "b": 2, "c": 3})
+ def test_dict_nonnumeric(self):
+ assert {"a": 1.0, "b": None} == pytest.approx({"a": 1.0, "b": None})
+ assert {"a": 1.0, "b": 1} != pytest.approx({"a": 1.0, "b": None})
+
+ def test_dict_vs_other(self):
+ assert 1 != approx({"a": 0})
+
def test_numpy_array(self):
np = pytest.importorskip("numpy")
)
mocked_doctest_runner.run(test)
- def test_unicode_plus_minus(self, testdir):
+ def test_unicode_plus_minus(self, pytester: Pytester) -> None:
"""
Comparing approx instances inside lists should not produce an error in the detailed diff.
Integration test for issue #2111.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
def test_foo():
"""
)
expected = "4.0e-06"
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
- ["*At index 0 diff: 3 != 4 ± {}".format(expected), "=* 1 failed in *="]
+ [f"*At index 0 diff: 3 != 4 ± {expected}", "=* 1 failed in *="]
)
+ @pytest.mark.parametrize(
+ "x, name",
+ [
+ pytest.param([[1]], "data structures", id="nested-list"),
+ pytest.param({"key": {"key": 1}}, "dictionaries", id="nested-dict"),
+ ],
+ )
+ def test_expected_value_type_error(self, x, name):
+ with pytest.raises(
+ TypeError, match=fr"pytest.approx\(\) does not support nested {name}:",
+ ):
+ approx(x)
+
@pytest.mark.parametrize(
"x",
[
pytest.param(None),
pytest.param("string"),
pytest.param(["string"], id="nested-str"),
- pytest.param([[1]], id="nested-list"),
pytest.param({"key": "string"}, id="dict-with-string"),
- pytest.param({"key": {"key": 1}}, id="nested-dict"),
],
)
- def test_expected_value_type_error(self, x):
- with pytest.raises(TypeError):
- approx(x)
+ def test_nonnumeric_okay_if_equal(self, x):
+ assert x == approx(x)
+
+ @pytest.mark.parametrize(
+ "x",
+ [
+ pytest.param("string"),
+ pytest.param(["string"], id="nested-str"),
+ pytest.param({"key": "string"}, id="dict-with-string"),
+ ],
+ )
+ def test_nonnumeric_false_if_unequal(self, x):
+ """For nonnumeric types, x != pytest.approx(y) reduces to x != y"""
+ assert "ab" != approx("abc")
+ assert ["ab"] != approx(["abc"])
+ # in particular, both of these should return False
+ assert {"a": 1.0} != approx({"a": None})
+ assert {"a": None} != approx({"a": 1.0})
+
+ assert 1.0 != approx(None)
+ assert None != approx(1.0) # noqa: E711
+
+ assert 1.0 != approx([None])
+ assert None != approx([1.0]) # noqa: E711
+
+ @pytest.mark.skipif(sys.version_info < (3, 7), reason="requires ordered dicts")
+ def test_nonnumeric_dict_repr(self):
+ """Dicts with non-numerics and infinites have no tolerances"""
+ x1 = {"foo": 1.0000005, "bar": None, "foobar": inf}
+ assert (
+ repr(approx(x1))
+ == "approx({'foo': 1.0000005 ± 1.0e-06, 'bar': None, 'foobar': inf})"
+ )
+
+ def test_nonnumeric_list_repr(self):
+ """Lists with non-numerics and infinites have no tolerances"""
+ x1 = [1.0000005, None, inf]
+ assert repr(approx(x1)) == "approx([1.0000005 ± 1.0e-06, None, inf])"
@pytest.mark.parametrize(
"op",
from _pytest._code import filter_traceback
try:
- ns = {} # type: Dict[str, Any]
+ ns: Dict[str, Any] = {}
exec("def foo(): raise ValueError", ns)
ns["foo"]()
except ValueError:
import sys
import textwrap
+from pathlib import Path
import pytest
from _pytest import fixtures
from _pytest.compat import getfuncargnames
from _pytest.config import ExitCode
from _pytest.fixtures import FixtureRequest
-from _pytest.pathlib import Path
from _pytest.pytester import get_public_names
+from _pytest.pytester import Testdir
def test_getfuncargnames_functions():
def test_func(something): pass
"""
)
- req = fixtures.FixtureRequest(item)
+ req = fixtures.FixtureRequest(item, _ispytest=True)
assert req.function == item.obj
assert req.keywords == item.keywords
assert hasattr(req.module, "test_func")
)
(item1,) = testdir.genitems([modcol])
assert item1.name == "test_method"
- arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs
+ arg2fixturedefs = fixtures.FixtureRequest(
+ item1, _ispytest=True
+ )._arg2fixturedefs
assert len(arg2fixturedefs) == 1
assert arg2fixturedefs["something"][0].argname == "something"
def test_request_getmodulepath(self, testdir):
modcol = testdir.getmodulecol("def test_somefunc(): pass")
(item,) = testdir.genitems([modcol])
- req = fixtures.FixtureRequest(item)
+ req = fixtures.FixtureRequest(item, _ispytest=True)
assert req.fspath == modcol.fspath
def test_request_fixturenames(self, testdir):
pass
"""
)
- req1 = fixtures.FixtureRequest(item1)
+ req1 = fixtures.FixtureRequest(item1, _ispytest=True)
assert "xfail" not in item1.keywords
req1.applymarker(pytest.mark.xfail)
assert "xfail" in item1.keywords
req1.applymarker(pytest.mark.skipif)
assert "skipif" in item1.keywords
with pytest.raises(ValueError):
- req1.applymarker(42)
+ req1.applymarker(42) # type: ignore[arg-type]
def test_accesskeywords(self, testdir):
testdir.makepyfile(
"""
from _pytest.pytester import get_public_names
def test_check_setup(item, fm):
- autousenames = fm._getautousenames(item.nodeid)
+ autousenames = list(fm._getautousenames(item.nodeid))
assert len(get_public_names(autousenames)) == 2
assert "perfunction2" in autousenames
assert "perfunction" in autousenames
pass
"""
)
- confcut = "--confcutdir={}".format(testdir.tmpdir)
+ confcut = f"--confcutdir={testdir.tmpdir}"
reprec = testdir.inline_run("-v", "-s", confcut)
reprec.assertoutcome(passed=8)
config = reprec.getcalls("pytest_unconfigure")[0].config
class TestContextManagerFixtureFuncs:
- @pytest.fixture(params=["fixture", "yield_fixture"])
- def flavor(self, request, testdir, monkeypatch):
- monkeypatch.setenv("PYTEST_FIXTURE_FLAVOR", request.param)
- testdir.makepyfile(
- test_context="""
- import os
- import pytest
- import warnings
- VAR = "PYTEST_FIXTURE_FLAVOR"
- if VAR not in os.environ:
- warnings.warn("PYTEST_FIXTURE_FLAVOR was not set, assuming fixture")
- fixture = pytest.fixture
- else:
- fixture = getattr(pytest, os.environ[VAR])
- """
- )
-
- def test_simple(self, testdir, flavor):
+ def test_simple(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture
+ import pytest
+ @pytest.fixture
def arg1():
print("setup")
yield 1
"""
)
- def test_scoped(self, testdir, flavor):
+ def test_scoped(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture(scope="module")
+ import pytest
+ @pytest.fixture(scope="module")
def arg1():
print("setup")
yield 1
"""
)
- def test_setup_exception(self, testdir, flavor):
+ def test_setup_exception(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture(scope="module")
+ import pytest
+ @pytest.fixture(scope="module")
def arg1():
pytest.fail("setup")
yield 1
"""
)
- def test_teardown_exception(self, testdir, flavor):
+ def test_teardown_exception(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture(scope="module")
+ import pytest
+ @pytest.fixture(scope="module")
def arg1():
yield 1
pytest.fail("teardown")
"""
)
- def test_yields_more_than_one(self, testdir, flavor):
+ def test_yields_more_than_one(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture(scope="module")
+ import pytest
+ @pytest.fixture(scope="module")
def arg1():
yield 1
yield 2
"""
)
- def test_custom_name(self, testdir, flavor):
+ def test_custom_name(self, testdir: Testdir) -> None:
testdir.makepyfile(
"""
- from test_context import fixture
- @fixture(name='meow')
+ import pytest
+ @pytest.fixture(name='meow')
def arg1():
return 'mew'
def test_1(meow):
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
- "{}:4".format(fixfile),
+ f"{fixfile}:4",
"Requested here:",
"test_foos.py:4",
"*1 failed*",
" test_foos.py::test_foo",
"",
"Requested fixture 'fix_with_param' defined in:",
- "{}:4".format(fixfile),
+ f"{fixfile}:4",
"Requested here:",
- "{}:4".format(testfile),
+ f"{testfile}:4",
"*1 failed*",
]
)
"""
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
assert request.fixturenames == "m1 f1".split()
def test_func_closure_with_native_fixtures(self, testdir, monkeypatch) -> None:
"""
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
# order of fixtures based on their scope and position in the parameter list
assert (
request.fixturenames == "s1 my_tmpdir_factory p1 m1 f1 f2 my_tmpdir".split()
"""
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
assert request.fixturenames == "m1 f1".split()
def test_func_closure_scopes_reordered(self, testdir):
"""
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
assert request.fixturenames == "s1 m1 c1 f2 f1".split()
def test_func_closure_same_scope_closer_root_first(self, testdir):
}
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
assert request.fixturenames == "p_sub m_conf m_sub m_test f1".split()
def test_func_closure_all_scopes_complex(self, testdir):
"""
)
items, _ = testdir.inline_genitems()
- request = FixtureRequest(items[0])
+ request = FixtureRequest(items[0], _ispytest=True)
assert request.fixturenames == "s1 p1 m1 m2 c1 f2 f1".split()
def test_multiple_packages(self, testdir):
# this hook finds funcarg factories
rep = runner.collect_one_node(collector=modcol)
# TODO: Don't treat as Any.
- clscol = rep.result[0] # type: Any
+ clscol: Any = rep.result[0]
clscol.obj = lambda arg1: None
clscol.funcargs = {}
pytest._fillfuncargs(clscol)
# this hook finds funcarg factories
rep = runner.collect_one_node(modcol)
# TODO: Don't treat as Any.
- clscol = rep.result[0] # type: Any
+ clscol: Any = rep.result[0]
clscol.obj = lambda: None
clscol.funcargs = {}
pytest._fillfuncargs(clscol)
_nodeid = attr.ib()
names = getfuncargnames(func)
- fixtureinfo = FuncFixtureInfoMock(names) # type: Any
- definition = DefinitionMock._create(func, "mock::nodeid") # type: Any
+ fixtureinfo: Any = FuncFixtureInfoMock(names)
+ definition: Any = DefinitionMock._create(func, "mock::nodeid")
return python.Metafunc(definition, fixtureinfo, config)
def test_no_funcargs(self) -> None:
option = "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
- values = [
+ values: List[Tuple[str, Any, str]] = [
("ação", MockConfig({option: True}), "ação"),
("ação", MockConfig({option: False}), "a\\xe7\\xe3o"),
- ] # type: List[Tuple[str, Any, str]]
+ ]
for val, config, expected in values:
actual = _idval(val, "a", 6, None, nodeid=None, config=config)
assert actual == expected
option = "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
- values = [
+ values: List[Tuple[Any, str]] = [
(MockConfig({option: True}), "ação"),
(MockConfig({option: False}), "a\\xe7\\xe3o"),
- ] # type: List[Tuple[Any, str]]
+ ]
for config, expected in values:
result = idmaker(
("a",), [pytest.param("string")], idfn=lambda _: "ação", config=config,
option = "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
- values = [
+ values: List[Tuple[Any, str]] = [
(MockConfig({option: True}), "ação"),
(MockConfig({option: False}), "a\\xe7\\xe3o"),
- ] # type: List[Tuple[Any, str]]
+ ]
for config, expected in values:
result = idmaker(
("a",), [pytest.param("string")], ids=["ação"], config=config,
' @pytest.mark.parametrise("x", range(2))',
"E Failed: Unknown 'parametrise' mark, did you mean 'parametrize'?",
"*! Interrupted: 1 error during collection !*",
- "*= 1 error in *",
+ "*= no tests collected, 1 error in *",
]
)
self, testdir: Testdir, monkeypatch
) -> None:
"""Integration test for (#3941)"""
- class_fix_setup = [] # type: List[object]
+ class_fix_setup: List[object] = []
monkeypatch.setattr(sys, "class_fix_setup", class_fix_setup, raising=False)
- func_fix_setup = [] # type: List[object]
+ func_fix_setup: List[object] = []
monkeypatch.setattr(sys, "func_fix_setup", func_fix_setup, raising=False)
testdir.makepyfile(
class T:
def __call__(self):
- # Early versions of Python 3.5 have some bug causing the
- # __call__ frame to still refer to t even after everything
- # is done. This makes the test pass for them.
- if sys.version_info < (3, 5, 2):
- del self
raise ValueError
t = T()
pytest.raises(TypeError, int, match="invalid")
def tfunc(match):
- raise ValueError("match={}".format(match))
+ raise ValueError(f"match={match}")
pytest.raises(ValueError, tfunc, match="asdf").match("match=asdf")
pytest.raises(ValueError, tfunc, match="").match("match=")
res_bash = set(fc(prefix))
retval = set(res) == res_bash
if out:
- out.write("equal_with_bash({}) {} {}\n".format(prefix, retval, res))
+ out.write(f"equal_with_bash({prefix}) {retval} {res}\n")
if not retval:
out.write(" python - bash: %s\n" % (set(res) - res_bash))
out.write(" bash - python: %s\n" % (res_bash - set(res)))
completion = []
if self.allowednames:
if self.directories:
- files = _wrapcall(
- ["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
- )
+ files = _wrapcall(["bash", "-c", f"compgen -A directory -- '{prefix}'"])
completion += [f + "/" for f in files]
for x in self.allowednames:
completion += _wrapcall(
- [
- "bash",
- "-c",
- "compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix),
- ]
+ ["bash", "-c", f"compgen -A file -X '!*.{x}' -- '{prefix}'"]
)
else:
- completion += _wrapcall(
- ["bash", "-c", "compgen -A file -- '{p}'".format(p=prefix)]
- )
+ completion += _wrapcall(["bash", "-c", f"compgen -A file -- '{prefix}'"])
- anticomp = _wrapcall(
- ["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
- )
+ anticomp = _wrapcall(["bash", "-c", f"compgen -A directory -- '{prefix}'"])
completion = list(set(completion) - set(anticomp))
-import collections.abc
+import collections
import sys
import textwrap
from typing import Any
from typing import List
+from typing import MutableSequence
from typing import Optional
import attr
from _pytest import outcomes
from _pytest.assertion import truncate
from _pytest.assertion import util
-from _pytest.compat import ATTRS_EQ_FIELD
+from _pytest.pytester import Pytester
def mock_config(verbose=0):
class TestImportHookInstallation:
@pytest.mark.parametrize("initial_conftest", [True, False])
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
- def test_conftest_assertion_rewrite(self, testdir, initial_conftest, mode):
+ def test_conftest_assertion_rewrite(
+ self, pytester: Pytester, initial_conftest, mode
+ ) -> None:
"""Test that conftest files are using assertion rewrite on import (#1619)."""
- testdir.tmpdir.join("foo/tests").ensure(dir=1)
+ pytester.mkdir("foo")
+ pytester.mkdir("foo/tests")
conftest_path = "conftest.py" if initial_conftest else "foo/conftest.py"
contents = {
conftest_path: """
check_first([10, 30], 30)
""",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess("--assert=%s" % mode)
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess("--assert=%s" % mode)
if mode == "plain":
expected = "E AssertionError"
elif mode == "rewrite":
assert 0
result.stdout.fnmatch_lines([expected])
- def test_rewrite_assertions_pytester_plugin(self, testdir):
+ def test_rewrite_assertions_pytester_plugin(self, pytester: Pytester) -> None:
"""
Assertions in the pytester plugin must also benefit from assertion
rewriting (#1920).
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
pytest_plugins = ['pytester']
- def test_dummy_failure(testdir): # how meta!
- testdir.makepyfile('def test(): assert 0')
- r = testdir.inline_run()
+ def test_dummy_failure(pytester): # how meta!
+ pytester.makepyfile('def test(): assert 0')
+ r = pytester.inline_run()
r.assertoutcome(passed=1)
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"> r.assertoutcome(passed=1)",
)
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
- def test_pytest_plugins_rewrite(self, testdir, mode):
+ def test_pytest_plugins_rewrite(self, pytester: Pytester, mode) -> None:
contents = {
"conftest.py": """
pytest_plugins = ['ham']
check_first([10, 30], 30)
""",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess("--assert=%s" % mode)
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess("--assert=%s" % mode)
if mode == "plain":
expected = "E AssertionError"
elif mode == "rewrite":
result.stdout.fnmatch_lines([expected])
@pytest.mark.parametrize("mode", ["str", "list"])
- def test_pytest_plugins_rewrite_module_names(self, testdir, mode):
+ def test_pytest_plugins_rewrite_module_names(
+ self, pytester: Pytester, mode
+ ) -> None:
"""Test that pluginmanager correct marks pytest_plugins variables
for assertion rewriting if they are defined as plain strings or
list of strings (#1888).
assert 'ham' in pytestconfig.pluginmanager.rewrite_hook._must_rewrite
""",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess("--assert=rewrite")
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess("--assert=rewrite")
assert result.ret == 0
- def test_pytest_plugins_rewrite_module_names_correctly(self, testdir):
+ def test_pytest_plugins_rewrite_module_names_correctly(
+ self, pytester: Pytester
+ ) -> None:
"""Test that we match files correctly when they are marked for rewriting (#2939)."""
contents = {
"conftest.py": """\
assert pytestconfig.pluginmanager.rewrite_hook.find_spec('hamster') is None
""",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess("--assert=rewrite")
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess("--assert=rewrite")
assert result.ret == 0
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
- def test_installed_plugin_rewrite(self, testdir, mode, monkeypatch):
+ def test_installed_plugin_rewrite(
+ self, pytester: Pytester, mode, monkeypatch
+ ) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
# Make sure the hook is installed early enough so that plugins
# installed via setuptools are rewritten.
- testdir.tmpdir.join("hampkg").ensure(dir=1)
+ pytester.mkdir("hampkg")
contents = {
"hampkg/__init__.py": """\
import pytest
check_first([10, 30], 30)
""",
}
- testdir.makepyfile(**contents)
- result = testdir.run(
+ pytester.makepyfile(**contents)
+ result = pytester.run(
sys.executable, "mainwrapper.py", "-s", "--assert=%s" % mode
)
if mode == "plain":
assert 0
result.stdout.fnmatch_lines([expected])
- def test_rewrite_ast(self, testdir):
- testdir.tmpdir.join("pkg").ensure(dir=1)
+ def test_rewrite_ast(self, pytester: Pytester) -> None:
+ pytester.mkdir("pkg")
contents = {
"pkg/__init__.py": """
import pytest
pkg.other.tool()
""",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess("--assert=rewrite")
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess("--assert=rewrite")
result.stdout.fnmatch_lines(
[
">*assert a == b*",
class TestBinReprIntegration:
- def test_pytest_assertrepr_compare_called(self, testdir):
- testdir.makeconftest(
+ def test_pytest_assertrepr_compare_called(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
values = []
return values
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_hello():
assert 0 == 1
assert list == [("==", 0, 1)]
"""
)
- result = testdir.runpytest("-v")
+ result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(["*test_hello*FAIL*", "*test_check*PASS*"])
class TestAssert_reprcompare:
- def test_different_types(self):
+ def test_different_types(self) -> None:
assert callequal([0, 1], "foo") is None
def test_summary(self) -> None:
summary = lines[0]
assert len(summary) < 65
- def test_text_diff(self):
+ def test_text_diff(self) -> None:
assert callequal("spam", "eggs") == [
"'spam' == 'eggs'",
"- eggs",
assert "- eggs" in diff
assert "+ spam" in diff
- def test_bytes_diff_normal(self):
+ def test_bytes_diff_normal(self) -> None:
"""Check special handling for bytes diff (#5260)"""
diff = callequal(b"spam", b"eggs")
"Use -v to get the full diff",
]
- def test_bytes_diff_verbose(self):
+ def test_bytes_diff_verbose(self) -> None:
"""Check special handling for bytes diff (#5260)"""
diff = callequal(b"spam", b"eggs", verbose=1)
assert diff == [
assert expl is not None
assert len(expl) > 1
- def test_list_wrap_for_multiple_lines(self):
+ def test_list_wrap_for_multiple_lines(self) -> None:
long_d = "d" * 80
l1 = ["a", "b", "c"]
l2 = ["a", "b", "c", long_d]
" ]",
]
- def test_list_wrap_for_width_rewrap_same_length(self):
+ def test_list_wrap_for_width_rewrap_same_length(self) -> None:
long_a = "a" * 30
long_b = "b" * 30
long_c = "c" * 30
" ]",
]
- def test_list_dont_wrap_strings(self):
+ def test_list_dont_wrap_strings(self) -> None:
long_a = "a" * 10
l1 = ["a"] + [long_a for _ in range(0, 7)]
l2 = ["should not get wrapped"]
" ]",
]
- def test_dict_wrap(self):
+ def test_dict_wrap(self) -> None:
d1 = {"common": 1, "env": {"env1": 1, "env2": 2}}
d2 = {"common": 1, "env": {"env1": 1}}
assert "Omitting" not in lines[1]
assert lines[2] == "{'b': 1}"
- def test_dict_different_items(self):
+ def test_dict_different_items(self) -> None:
lines = callequal({"a": 0}, {"b": 1, "c": 2}, verbose=2)
assert lines == [
"{'a': 0} == {'b': 1, 'c': 2}",
"+ {'b': 1, 'c': 2}",
]
- def test_sequence_different_items(self):
+ def test_sequence_different_items(self) -> None:
lines = callequal((1, 2), (3, 4, 5), verbose=2)
assert lines == [
"(1, 2) == (3, 4, 5)",
def test_Sequence(self) -> None:
# Test comparing with a Sequence subclass.
- # TODO(py36): Inherit from typing.MutableSequence[int].
- class TestSequence(collections.abc.MutableSequence): # type: ignore[type-arg]
+ class TestSequence(MutableSequence[int]):
def __init__(self, iterable):
self.elements = list(iterable)
" Probably an object has a faulty __repr__.)",
]
- def test_one_repr_empty(self):
+ def test_one_repr_empty(self) -> None:
"""The faulty empty string repr did trigger an unbound local error in _diff_text."""
class A(str):
assert expl is not None
assert "raised in repr()" not in " ".join(expl)
- def test_unicode(self):
+ def test_unicode(self) -> None:
assert callequal("£€", "£") == [
"'£€' == '£'",
"- £",
"+ £€",
]
- def test_nonascii_text(self):
+ def test_nonascii_text(self) -> None:
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
expl = callequal(A(), "1")
assert expl == ["ÿ == '1'", "- 1"]
- def test_format_nonascii_explanation(self):
+ def test_format_nonascii_explanation(self) -> None:
assert util.format_explanation("λ")
def test_mojibake(self) -> None:
class TestAssert_reprcompare_dataclass:
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_dataclasses(self, testdir):
- p = testdir.copy_example("dataclasses/test_compare_dataclasses.py")
- result = testdir.runpytest(p)
+ def test_dataclasses(self, pytester: Pytester) -> None:
+ p = pytester.copy_example("dataclasses/test_compare_dataclasses.py")
+ result = pytester.runpytest(p)
result.assert_outcomes(failed=1, passed=0)
result.stdout.fnmatch_lines(
[
)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_recursive_dataclasses(self, testdir):
- p = testdir.copy_example("dataclasses/test_compare_recursive_dataclasses.py")
- result = testdir.runpytest(p)
+ def test_recursive_dataclasses(self, pytester: Pytester) -> None:
+ p = pytester.copy_example("dataclasses/test_compare_recursive_dataclasses.py")
+ result = pytester.runpytest(p)
result.assert_outcomes(failed=1, passed=0)
result.stdout.fnmatch_lines(
[
)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_recursive_dataclasses_verbose(self, testdir):
- p = testdir.copy_example("dataclasses/test_compare_recursive_dataclasses.py")
- result = testdir.runpytest(p, "-vv")
+ def test_recursive_dataclasses_verbose(self, pytester: Pytester) -> None:
+ p = pytester.copy_example("dataclasses/test_compare_recursive_dataclasses.py")
+ result = pytester.runpytest(p, "-vv")
result.assert_outcomes(failed=1, passed=0)
result.stdout.fnmatch_lines(
[
)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_dataclasses_verbose(self, testdir):
- p = testdir.copy_example("dataclasses/test_compare_dataclasses_verbose.py")
- result = testdir.runpytest(p, "-vv")
+ def test_dataclasses_verbose(self, pytester: Pytester) -> None:
+ p = pytester.copy_example("dataclasses/test_compare_dataclasses_verbose.py")
+ result = pytester.runpytest(p, "-vv")
result.assert_outcomes(failed=1, passed=0)
result.stdout.fnmatch_lines(
[
)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_dataclasses_with_attribute_comparison_off(self, testdir):
- p = testdir.copy_example(
+ def test_dataclasses_with_attribute_comparison_off(
+ self, pytester: Pytester
+ ) -> None:
+ p = pytester.copy_example(
"dataclasses/test_compare_dataclasses_field_comparison_off.py"
)
- result = testdir.runpytest(p, "-vv")
+ result = pytester.runpytest(p, "-vv")
result.assert_outcomes(failed=0, passed=1)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+")
- def test_comparing_two_different_data_classes(self, testdir):
- p = testdir.copy_example(
+ def test_comparing_two_different_data_classes(self, pytester: Pytester) -> None:
+ p = pytester.copy_example(
"dataclasses/test_compare_two_different_dataclasses.py"
)
- result = testdir.runpytest(p, "-vv")
+ result = pytester.runpytest(p, "-vv")
result.assert_outcomes(failed=0, passed=1)
assert "Omitting" not in lines[2]
assert lines[3] == "['field_a']"
- def test_attrs_with_attribute_comparison_off(self):
+ def test_attrs_with_attribute_comparison_off(self) -> None:
@attr.s
class SimpleDataObject:
field_a = attr.ib()
- field_b = attr.ib(**{ATTRS_EQ_FIELD: False}) # type: ignore
+ field_b = attr.ib(eq=False)
left = SimpleDataObject(1, "b")
right = SimpleDataObject(1, "b")
for line in lines[3:]:
assert "field_b" not in line
- def test_comparing_two_different_attrs_classes(self):
+ def test_comparing_two_different_attrs_classes(self) -> None:
@attr.s
class SimpleDataObjectOne:
field_a = attr.ib()
assert lines is None
+class TestAssert_reprcompare_namedtuple:
+ def test_namedtuple(self) -> None:
+ NT = collections.namedtuple("NT", ["a", "b"])
+
+ left = NT(1, "b")
+ right = NT(1, "c")
+
+ lines = callequal(left, right)
+ assert lines == [
+ "NT(a=1, b='b') == NT(a=1, b='c')",
+ "",
+ "Omitting 1 identical items, use -vv to show",
+ "Differing attributes:",
+ "['b']",
+ "",
+ "Drill down into differing attribute b:",
+ " b: 'b' != 'c'",
+ " - c",
+ " + b",
+ "Use -v to get the full diff",
+ ]
+
+ def test_comparing_two_different_namedtuple(self) -> None:
+ NT1 = collections.namedtuple("NT1", ["a", "b"])
+ NT2 = collections.namedtuple("NT2", ["a", "b"])
+
+ left = NT1(1, "b")
+ right = NT2(2, "b")
+
+ lines = callequal(left, right)
+ # Because the types are different, uses the generic sequence matcher.
+ assert lines == [
+ "NT1(a=1, b='b') == NT2(a=2, b='b')",
+ "At index 0 diff: 1 != 2",
+ "Use -v to get the full diff",
+ ]
+
+
class TestFormatExplanation:
- def test_special_chars_full(self, testdir):
+ def test_special_chars_full(self, pytester: Pytester) -> None:
# Issue 453, for the bug this would raise IndexError
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_foo():
assert '\\n}' == ''
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(["*AssertionError*"])
- def test_fmt_simple(self):
+ def test_fmt_simple(self) -> None:
expl = "assert foo"
assert util.format_explanation(expl) == "assert foo"
- def test_fmt_where(self):
+ def test_fmt_where(self) -> None:
expl = "\n".join(["assert 1", "{1 = foo", "} == 2"])
res = "\n".join(["assert 1 == 2", " + where 1 = foo"])
assert util.format_explanation(expl) == res
- def test_fmt_and(self):
+ def test_fmt_and(self) -> None:
expl = "\n".join(["assert 1", "{1 = foo", "} == 2", "{2 = bar", "}"])
res = "\n".join(["assert 1 == 2", " + where 1 = foo", " + and 2 = bar"])
assert util.format_explanation(expl) == res
- def test_fmt_where_nested(self):
+ def test_fmt_where_nested(self) -> None:
expl = "\n".join(["assert 1", "{1 = foo", "{foo = bar", "}", "} == 2"])
res = "\n".join(["assert 1 == 2", " + where 1 = foo", " + where foo = bar"])
assert util.format_explanation(expl) == res
- def test_fmt_newline(self):
+ def test_fmt_newline(self) -> None:
expl = "\n".join(['assert "foo" == "bar"', "~- foo", "~+ bar"])
res = "\n".join(['assert "foo" == "bar"', " - foo", " + bar"])
assert util.format_explanation(expl) == res
- def test_fmt_newline_escaped(self):
+ def test_fmt_newline_escaped(self) -> None:
expl = "\n".join(["assert foo == bar", "baz"])
res = "assert foo == bar\\nbaz"
assert util.format_explanation(expl) == res
- def test_fmt_newline_before_where(self):
+ def test_fmt_newline_before_where(self) -> None:
expl = "\n".join(
[
"the assertion message here",
)
assert util.format_explanation(expl) == res
- def test_fmt_multi_newline_before_where(self):
+ def test_fmt_multi_newline_before_where(self) -> None:
expl = "\n".join(
[
"the assertion",
LINES_IN_TRUNCATION_MSG = 2
def test_doesnt_truncate_when_input_is_empty_list(self) -> None:
- expl = [] # type: List[str]
+ expl: List[str] = []
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result == expl
- def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self):
+ def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self) -> None:
expl = ["a" * 100 for x in range(5)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result == expl
- def test_truncates_at_8_lines_when_given_list_of_empty_strings(self):
+ def test_truncates_at_8_lines_when_given_list_of_empty_strings(self) -> None:
expl = ["" for x in range(50)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result != expl
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
- def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self):
+ def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self) -> None:
expl = ["a" for x in range(100)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result != expl
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
- def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self):
+ def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self) -> None:
expl = ["a" * 80 for x in range(16)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result != expl
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
- def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self):
+ def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self) -> None:
expl = ["a" * 250 for x in range(10)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999)
assert result != expl
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
- def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self):
+ def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self) -> None:
expl = ["a" * 250 for x in range(1000)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result != expl
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
- def test_full_output_truncated(self, monkeypatch, testdir):
+ def test_full_output_truncated(self, monkeypatch, pytester: Pytester) -> None:
"""Test against full runpytest() output."""
line_count = 7
line_len = 100
expected_truncated_lines = 2
- testdir.makepyfile(
+ pytester.makepyfile(
r"""
def test_many_lines():
a = list([str(i)[0] * %d for i in range(%d)])
)
monkeypatch.delenv("CI", raising=False)
- result = testdir.runpytest()
+ result = pytester.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines(
[
]
)
- result = testdir.runpytest("-vv")
+ result = pytester.runpytest("-vv")
result.stdout.fnmatch_lines(["* 6*"])
monkeypatch.setenv("CI", "1")
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 6*"])
-def test_python25_compile_issue257(testdir):
- testdir.makepyfile(
+def test_python25_compile_issue257(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_rewritten():
assert 1 == 2
# some comment
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
"""
)
-def test_rewritten(testdir):
- testdir.makepyfile(
+def test_rewritten(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()
"""
)
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
def test_reprcompare_notin() -> None:
]
-def test_reprcompare_whitespaces():
+def test_reprcompare_whitespaces() -> None:
assert callequal("\r\n", "\n") == [
r"'\r\n' == '\n'",
r"Strings contain only whitespace, escaping them using repr()",
]
-def test_pytest_assertrepr_compare_integration(testdir):
- testdir.makepyfile(
+def test_pytest_assertrepr_compare_integration(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_hello():
x = set(range(100))
assert x == y
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_hello():*",
)
-def test_sequence_comparison_uses_repr(testdir):
- testdir.makepyfile(
+def test_sequence_comparison_uses_repr(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_hello():
x = set("hello x")
assert x == y
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_hello():*",
)
-def test_assertrepr_loaded_per_dir(testdir):
- testdir.makepyfile(test_base=["def test_base(): assert 1 == 2"])
- a = testdir.mkdir("a")
- a_test = a.join("test_a.py")
- a_test.write("def test_a(): assert 1 == 2")
- a_conftest = a.join("conftest.py")
- a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
- b = testdir.mkdir("b")
- b_test = b.join("test_b.py")
- b_test.write("def test_b(): assert 1 == 2")
- b_conftest = b.join("conftest.py")
- b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
- result = testdir.runpytest()
+def test_assertrepr_loaded_per_dir(pytester: Pytester) -> None:
+ pytester.makepyfile(test_base=["def test_base(): assert 1 == 2"])
+ a = pytester.mkdir("a")
+ a.joinpath("test_a.py").write_text("def test_a(): assert 1 == 2")
+ a.joinpath("conftest.py").write_text(
+ 'def pytest_assertrepr_compare(): return ["summary a"]'
+ )
+ b = pytester.mkdir("b")
+ b.joinpath("test_b.py").write_text("def test_b(): assert 1 == 2")
+ b.joinpath("conftest.py").write_text(
+ 'def pytest_assertrepr_compare(): return ["summary b"]'
+ )
+
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*def test_base():*",
)
-def test_assertion_options(testdir):
- testdir.makepyfile(
+def test_assertion_options(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_hello():
x = 3
assert x == 4
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert "3 == 4" in result.stdout.str()
- result = testdir.runpytest_subprocess("--assert=plain")
+ result = pytester.runpytest_subprocess("--assert=plain")
result.stdout.no_fnmatch_line("*3 == 4*")
-def test_triple_quoted_string_issue113(testdir):
- testdir.makepyfile(
+def test_triple_quoted_string_issue113(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_hello():
assert "" == '''
'''"""
)
- result = testdir.runpytest("--fulltrace")
+ result = pytester.runpytest("--fulltrace")
result.stdout.fnmatch_lines(["*1 failed*"])
result.stdout.no_fnmatch_line("*SyntaxError*")
-def test_traceback_failure(testdir):
- p1 = testdir.makepyfile(
+def test_traceback_failure(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def g():
return 2
f(3)
"""
)
- result = testdir.runpytest(p1, "--tb=long")
+ result = pytester.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines(
[
"*test_traceback_failure.py F*",
]
)
- result = testdir.runpytest(p1) # "auto"
+ result = pytester.runpytest(p1) # "auto"
result.stdout.fnmatch_lines(
[
"*test_traceback_failure.py F*",
)
-def test_exception_handling_no_traceback(testdir):
+def test_exception_handling_no_traceback(pytester: Pytester) -> None:
"""Handle chain exceptions in tasks submitted by the multiprocess module (#1984)."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
from multiprocessing import Pool
multitask_job()
"""
)
- testdir.syspathinsert()
- result = testdir.runpytest(p1, "--tb=long")
+ pytester.syspathinsert()
+ result = pytester.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines(
[
"====* FAILURES *====",
),
],
)
-def test_warn_missing(testdir, cmdline_args, warning_output):
- testdir.makepyfile("")
+def test_warn_missing(pytester: Pytester, cmdline_args, warning_output) -> None:
+ pytester.makepyfile("")
- result = testdir.run(sys.executable, *cmdline_args)
+ result = pytester.run(sys.executable, *cmdline_args)
result.stdout.fnmatch_lines(warning_output)
-def test_recursion_source_decode(testdir):
- testdir.makepyfile(
+def test_recursion_source_decode(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_something():
pass
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_files = *.py
"""
)
- result = testdir.runpytest("--collect-only")
+ result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
"""
<Module*>
)
-def test_AssertionError_message(testdir):
- testdir.makepyfile(
+def test_AssertionError_message(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_hello():
x,y = 1,2
assert 0, (x,y)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*def test_hello*
)
-def test_diff_newline_at_end(testdir):
- testdir.makepyfile(
+def test_diff_newline_at_end(pytester: Pytester) -> None:
+ pytester.makepyfile(
r"""
def test_diff():
assert 'asdf' == 'asdf\n'
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
r"""
*assert 'asdf' == 'asdf\n'
@pytest.mark.filterwarnings("default")
-def test_assert_tuple_warning(testdir):
+def test_assert_tuple_warning(pytester: Pytester) -> None:
msg = "assertion is always true"
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_tuple():
assert(False, 'you shall not pass')
"""
)
- result = testdir.runpytest()
- result.stdout.fnmatch_lines(["*test_assert_tuple_warning.py:2:*{}*".format(msg)])
+ result = pytester.runpytest()
+ result.stdout.fnmatch_lines([f"*test_assert_tuple_warning.py:2:*{msg}*"])
# tuples with size != 2 should not trigger the warning
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_tuple():
assert ()
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert msg not in result.stdout.str()
-def test_assert_indirect_tuple_no_warning(testdir):
- testdir.makepyfile(
+def test_assert_indirect_tuple_no_warning(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_tuple():
tpl = ('foo', 'bar')
assert tpl
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
output = "\n".join(result.stdout.lines)
assert "WR1" not in output
-def test_assert_with_unicode(testdir):
- testdir.makepyfile(
+def test_assert_with_unicode(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""\
def test_unicode():
assert 'ìœ ë‹ˆì½”ë“œ' == 'Unicode'
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*AssertionError*"])
-def test_raise_unprintable_assertion_error(testdir):
- testdir.makepyfile(
+def test_raise_unprintable_assertion_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
r"""
def test_raise_assertion_error():
raise AssertionError('\xff')
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[r"> raise AssertionError('\xff')", "E AssertionError: *"]
)
-def test_raise_assertion_error_raisin_repr(testdir):
- testdir.makepyfile(
+def test_raise_assertion_error_raisin_repr(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
class RaisingRepr(object):
def __repr__(self):
raise AssertionError(RaisingRepr())
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
["E AssertionError: <unprintable AssertionError object>"]
)
-def test_issue_1944(testdir):
- testdir.makepyfile(
+def test_issue_1944(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def f():
return
assert f() == 10
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
assert (
"AttributeError: 'Module' object has no attribute '_obj'"
)
-def test_exit_from_assertrepr_compare(monkeypatch):
+def test_exit_from_assertrepr_compare(monkeypatch) -> None:
def raise_exit(obj):
outcomes.exit("Quitting debugger")
callequal(1, 1)
-def test_assertion_location_with_coverage(testdir):
+def test_assertion_location_with_coverage(pytester: Pytester) -> None:
"""This used to report the wrong location when run with coverage (#5754)."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def test():
assert False, 1
assert False, 2
"""
)
- result = testdir.runpytest(str(p))
+ result = pytester.runpytest(str(p))
result.stdout.fnmatch_lines(
[
"> assert False, 1",
import errno
import glob
import importlib
+import marshal
import os
import py_compile
import stat
import textwrap
import zipfile
from functools import partial
+from pathlib import Path
from typing import Dict
from typing import List
from typing import Mapping
from _pytest.assertion.rewrite import rewrite_asserts
from _pytest.config import ExitCode
from _pytest.pathlib import make_numbered_dir
-from _pytest.pathlib import Path
-from _pytest.pytester import Testdir
+from _pytest.pytester import Pytester
def rewrite(src: str) -> ast.Module:
f, extra_ns: Optional[Mapping[str, object]] = None, *, must_pass: bool = False
) -> Optional[str]:
"""Rewrite the assertions in f, run it, and get the failure message."""
- src = "\n".join(_pytest._code.Code(f).source().lines)
+ src = "\n".join(_pytest._code.Code.from_function(f).source().lines)
mod = rewrite(src)
code = compile(mod, "<test>", "exec")
- ns = {} # type: Dict[str, object]
+ ns: Dict[str, object] = {}
if extra_ns is not None:
ns.update(extra_ns)
exec(code, ns)
class TestAssertionRewrite:
- def test_place_initial_imports(self):
+ def test_place_initial_imports(self) -> None:
s = """'Doc string'\nother = stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[1], ast.Assert)
assert m.body[1].msg is None
- def test_dont_rewrite_plugin(self, testdir):
+ def test_dont_rewrite_plugin(self, pytester: Pytester) -> None:
contents = {
"conftest.py": "pytest_plugins = 'plugin'; import plugin",
"plugin.py": "'PYTEST_DONT_REWRITE'",
"test_foo.py": "def test_foo(): pass",
}
- testdir.makepyfile(**contents)
- result = testdir.runpytest_subprocess()
+ pytester.makepyfile(**contents)
+ result = pytester.runpytest_subprocess()
assert "warning" not in "".join(result.outlines)
- def test_rewrites_plugin_as_a_package(self, testdir):
- pkgdir = testdir.mkpydir("plugin")
- pkgdir.join("__init__.py").write(
+ def test_rewrites_plugin_as_a_package(self, pytester: Pytester) -> None:
+ pkgdir = pytester.mkpydir("plugin")
+ pkgdir.joinpath("__init__.py").write_text(
"import pytest\n"
"@pytest.fixture\n"
"def special_asserter():\n"
" assert x == y\n"
" return special_assert\n"
)
- testdir.makeconftest('pytest_plugins = ["plugin"]')
- testdir.makepyfile("def test(special_asserter): special_asserter(1, 2)\n")
- result = testdir.runpytest()
+ pytester.makeconftest('pytest_plugins = ["plugin"]')
+ pytester.makepyfile("def test(special_asserter): special_asserter(1, 2)\n")
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*assert 1 == 2*"])
- def test_honors_pep_235(self, testdir, monkeypatch):
+ def test_honors_pep_235(self, pytester: Pytester, monkeypatch) -> None:
# note: couldn't make it fail on macos with a single `sys.path` entry
# note: these modules are named `test_*` to trigger rewriting
- testdir.tmpdir.join("test_y.py").write("x = 1")
- xdir = testdir.tmpdir.join("x").ensure_dir()
- xdir.join("test_Y").ensure_dir().join("__init__.py").write("x = 2")
- testdir.makepyfile(
+ pytester.makepyfile(test_y="x = 1")
+ xdir = pytester.mkdir("x")
+ pytester.mkpydir(str(xdir.joinpath("test_Y")))
+ xdir.joinpath("test_Y").joinpath("__init__.py").write_text("x = 2")
+ pytester.makepyfile(
"import test_y\n"
"import test_Y\n"
"def test():\n"
" assert test_y.x == 1\n"
" assert test_Y.x == 2\n"
)
- monkeypatch.syspath_prepend(xdir)
- testdir.runpytest().assert_outcomes(passed=1)
+ monkeypatch.syspath_prepend(str(xdir))
+ pytester.runpytest().assert_outcomes(passed=1)
def test_name(self, request) -> None:
def f1() -> None:
lines = msg.splitlines()
if verbose > 1:
assert lines == [
- "assert {!r} == 42".format(X),
- " +{!r}".format(X),
+ f"assert {X!r} == 42",
+ f" +{X!r}",
" -42",
]
elif verbose > 0:
assert lines == [
"assert <class 'test_...e.<locals>.X'> == 42",
- " +{!r}".format(X),
+ f" +{X!r}",
" -42",
]
else:
" + where Y = cls()",
]
- def test_assert_already_has_message(self):
+ def test_assert_already_has_message(self) -> None:
def f():
assert False, "something bad!"
assert getmsg(f) == "AssertionError: something bad!\nassert False"
- def test_assertion_message(self, testdir):
- testdir.makepyfile(
+ def test_assertion_message(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 2, "The failure message"
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*The failure message*", "*assert 1 == 2*"]
)
- def test_assertion_message_multiline(self, testdir):
- testdir.makepyfile(
+ def test_assertion_message_multiline(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 2, "A multiline\\nfailure message"
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*A multiline*", "*failure message*", "*assert 1 == 2*"]
)
- def test_assertion_message_tuple(self, testdir):
- testdir.makepyfile(
+ def test_assertion_message_tuple(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 2, (1, 2)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*%s*" % repr((1, 2)), "*assert 1 == 2*"]
)
- def test_assertion_message_expr(self, testdir):
- testdir.makepyfile(
+ def test_assertion_message_expr(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 2, 1 + 2
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(["*AssertionError*3*", "*assert 1 == 2*"])
- def test_assertion_message_escape(self, testdir):
- testdir.makepyfile(
+ def test_assertion_message_escape(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 2, 'To be escaped: %'
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError: To be escaped: %", "*assert 1 == 2"]
)
- def test_assertion_messages_bytes(self, testdir):
- testdir.makepyfile("def test_bytes_assertion():\n assert False, b'ohai!'\n")
- result = testdir.runpytest()
+ def test_assertion_messages_bytes(self, pytester: Pytester) -> None:
+ pytester.makepyfile("def test_bytes_assertion():\n assert False, b'ohai!'\n")
+ result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(["*AssertionError: b'ohai!'", "*assert False"])
assert getmsg(f2) == "assert (False or (4 % 2))"
- def test_at_operator_issue1290(self, testdir):
- testdir.makepyfile(
+ def test_at_operator_issue1290(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
class Matrix(object):
def __init__(self, num):
def test_multmat_operator():
assert Matrix(2) @ Matrix(3) == 6"""
)
- testdir.runpytest().assert_outcomes(passed=1)
+ pytester.runpytest().assert_outcomes(passed=1)
- def test_starred_with_side_effect(self, testdir):
+ def test_starred_with_side_effect(self, pytester: Pytester) -> None:
"""See #4412"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test():
f = lambda x: x
assert 2 * next(x) == f(*[next(x)])
"""
)
- testdir.runpytest().assert_outcomes(passed=1)
+ pytester.runpytest().assert_outcomes(passed=1)
def test_call(self) -> None:
def g(a=42, *args, **kwargs) -> bool:
getmsg(f5, must_pass=True)
- def test_len(self, request):
+ def test_len(self, request) -> None:
def f():
values = list(range(10))
assert len(values) == 11
assert getmsg(f1) == "assert 42"
def my_reprcompare2(op, left, right) -> str:
- return "{} {} {}".format(left, op, right)
+ return f"{left} {op} {right}"
monkeypatch.setattr(util, "_reprcompare", my_reprcompare2)
class TestRewriteOnImport:
- def test_pycache_is_a_file(self, testdir):
- testdir.tmpdir.join("__pycache__").write("Hello")
- testdir.makepyfile(
+ def test_pycache_is_a_file(self, pytester: Pytester) -> None:
+ pytester.path.joinpath("__pycache__").write_text("Hello")
+ pytester.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
- def test_pycache_is_readonly(self, testdir):
- cache = testdir.tmpdir.mkdir("__pycache__")
- old_mode = cache.stat().mode
+ def test_pycache_is_readonly(self, pytester: Pytester) -> None:
+ cache = pytester.mkdir("__pycache__")
+ old_mode = cache.stat().st_mode
cache.chmod(old_mode ^ stat.S_IWRITE)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
try:
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
finally:
cache.chmod(old_mode)
- def test_zipfile(self, testdir):
- z = testdir.tmpdir.join("myzip.zip")
+ def test_zipfile(self, pytester: Pytester) -> None:
+ z = pytester.path.joinpath("myzip.zip")
z_fn = str(z)
f = zipfile.ZipFile(z_fn, "w")
try:
finally:
f.close()
z.chmod(256)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import sys
sys.path.append(%r)
import test_gum.test_lizard"""
% (z_fn,)
)
- assert testdir.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
+ assert pytester.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
- def test_readonly(self, testdir):
- sub = testdir.mkdir("testing")
- sub.join("test_readonly.py").write(
+ def test_readonly(self, pytester: Pytester) -> None:
+ sub = pytester.mkdir("testing")
+ sub.joinpath("test_readonly.py").write_bytes(
b"""
def test_rewritten():
assert "@py_builtins" in globals()
""",
- "wb",
)
- old_mode = sub.stat().mode
+ old_mode = sub.stat().st_mode
sub.chmod(320)
try:
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
finally:
sub.chmod(old_mode)
- def test_dont_write_bytecode(self, testdir, monkeypatch):
- testdir.makepyfile(
+ def test_dont_write_bytecode(self, pytester: Pytester, monkeypatch) -> None:
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+
+ pytester.makepyfile(
"""
import os
def test_no_bytecode():
assert not os.path.exists(os.path.dirname(__cached__))"""
)
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
- assert testdir.runpytest_subprocess().ret == 0
+ assert pytester.runpytest_subprocess().ret == 0
+
+ def test_orphaned_pyc_file(self, pytester: Pytester, monkeypatch) -> None:
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+ monkeypatch.setattr(sys, "pycache_prefix", None, raising=False)
- def test_orphaned_pyc_file(self, testdir):
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import orphan
def test_it():
assert orphan.value == 17
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
orphan="""
value = 17
"""
assert len(pycs) == 1
os.rename(pycs[0], "orphan.pyc")
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
- def test_cached_pyc_includes_pytest_version(self, testdir, monkeypatch):
+ def test_cached_pyc_includes_pytest_version(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
"""Avoid stale caches (#1671)"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
- testdir.makepyfile(
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+ pytester.makepyfile(
test_foo="""
def test_foo():
assert True
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
assert result.ret == 0
- found_names = glob.glob(
- "__pycache__/*-pytest-{}.pyc".format(pytest.__version__)
- )
+ found_names = glob.glob(f"__pycache__/*-pytest-{pytest.__version__}.pyc")
assert found_names, "pyc with expected tag not found in names: {}".format(
glob.glob("__pycache__/*.pyc")
)
@pytest.mark.skipif('"__pypy__" in sys.modules')
- def test_pyc_vs_pyo(self, testdir, monkeypatch):
- testdir.makepyfile(
+ def test_pyc_vs_pyo(self, pytester: Pytester, monkeypatch) -> None:
+ pytester.makepyfile(
"""
import pytest
def test_optimized():
"hello"
assert test_optimized.__doc__ is None"""
)
- p = make_numbered_dir(root=Path(testdir.tmpdir), prefix="runpytest-")
+ p = make_numbered_dir(root=Path(pytester.path), prefix="runpytest-")
tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
- assert testdir.runpytest_subprocess(tmp).ret == 0
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+ assert pytester.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
- assert testdir.runpytest_subprocess(tmp).ret == 1
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+ assert pytester.runpytest_subprocess(tmp).ret == 1
assert tagged + ".pyc" in os.listdir("__pycache__")
- def test_package(self, testdir):
- pkg = testdir.tmpdir.join("pkg")
+ def test_package(self, pytester: Pytester) -> None:
+ pkg = pytester.path.joinpath("pkg")
pkg.mkdir()
- pkg.join("__init__.py").ensure()
- pkg.join("test_blah.py").write(
+ pkg.joinpath("__init__.py")
+ pkg.joinpath("test_blah.py").write_text(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
- def test_translate_newlines(self, testdir):
+ def test_translate_newlines(self, pytester: Pytester) -> None:
content = "def test_rewritten():\r\n assert '@py_builtins' in globals()"
b = content.encode("utf-8")
- testdir.tmpdir.join("test_newlines.py").write(b, "wb")
- assert testdir.runpytest().ret == 0
+ pytester.path.joinpath("test_newlines.py").write_bytes(b)
+ assert pytester.runpytest().ret == 0
- def test_package_without__init__py(self, testdir):
- pkg = testdir.mkdir("a_package_without_init_py")
- pkg.join("module.py").ensure()
- testdir.makepyfile("import a_package_without_init_py.module")
- assert testdir.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
+ def test_package_without__init__py(self, pytester: Pytester) -> None:
+ pkg = pytester.mkdir("a_package_without_init_py")
+ pkg.joinpath("module.py").touch()
+ pytester.makepyfile("import a_package_without_init_py.module")
+ assert pytester.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
- def test_rewrite_warning(self, testdir):
- testdir.makeconftest(
+ def test_rewrite_warning(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
pytest.register_assert_rewrite("_pytest")
"""
)
# needs to be a subprocess because pytester explicitly disables this warning
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*Module already imported*: _pytest"])
- def test_rewrite_module_imported_from_conftest(self, testdir):
- testdir.makeconftest(
+ def test_rewrite_module_imported_from_conftest(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import test_rewrite_module_imported
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
test_rewrite_module_imported="""
def test_rewritten():
assert "@py_builtins" in globals()
"""
)
- assert testdir.runpytest_subprocess().ret == 0
+ assert pytester.runpytest_subprocess().ret == 0
- def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch):
+ def test_remember_rewritten_modules(
+ self, pytestconfig, pytester: Pytester, monkeypatch
+ ) -> None:
"""`AssertionRewriteHook` should remember rewritten modules so it
doesn't give false positives (#2005)."""
- monkeypatch.syspath_prepend(testdir.tmpdir)
- testdir.makepyfile(test_remember_rewritten_modules="")
+ monkeypatch.syspath_prepend(pytester.path)
+ pytester.makepyfile(test_remember_rewritten_modules="")
warnings = []
hook = AssertionRewritingHook(pytestconfig)
monkeypatch.setattr(
hook.mark_rewrite("test_remember_rewritten_modules")
assert warnings == []
- def test_rewrite_warning_using_pytest_plugins(self, testdir):
- testdir.makepyfile(
+ def test_rewrite_warning_using_pytest_plugins(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
**{
"conftest.py": "pytest_plugins = ['core', 'gui', 'sci']",
"core.py": "",
"test_rewrite_warning_pytest_plugins.py": "def test(): pass",
}
)
- testdir.chdir()
- result = testdir.runpytest_subprocess()
+ pytester.chdir()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
result.stdout.no_fnmatch_line("*pytest-warning summary*")
- def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):
+ def test_rewrite_warning_using_pytest_plugins_env_var(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
monkeypatch.setenv("PYTEST_PLUGINS", "plugin")
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"plugin.py": "",
"test_rewrite_warning_using_pytest_plugins_env_var.py": """
""",
}
)
- testdir.chdir()
- result = testdir.runpytest_subprocess()
+ pytester.chdir()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
result.stdout.no_fnmatch_line("*pytest-warning summary*")
class TestAssertionRewriteHookDetails:
- def test_sys_meta_path_munged(self, testdir):
- testdir.makepyfile(
+ def test_sys_meta_path_munged(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_meta_path():
import sys; sys.meta_path = []"""
)
- assert testdir.runpytest().ret == 0
+ assert pytester.runpytest().ret == 0
- def test_write_pyc(self, testdir: Testdir, tmpdir, monkeypatch) -> None:
+ def test_write_pyc(self, pytester: Pytester, tmp_path, monkeypatch) -> None:
from _pytest.assertion.rewrite import _write_pyc
from _pytest.assertion import AssertionState
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
state = AssertionState(config, "rewrite")
- source_path = str(tmpdir.ensure("source.py"))
- pycpath = tmpdir.join("pyc").strpath
+ tmp_path.joinpath("source.py").touch()
+ source_path = str(tmp_path)
+ pycpath = tmp_path.joinpath("pyc")
co = compile("1", "f.py", "single")
assert _write_pyc(state, co, os.stat(source_path), pycpath)
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
- def test_resources_provider_for_loader(self, testdir):
+ def test_resources_provider_for_loader(self, pytester: Pytester) -> None:
"""
Attempts to load resources from a package should succeed normally,
even when the AssertionRewriteHook is used to load the modules.
"""
pytest.importorskip("pkg_resources")
- testdir.mkpydir("testpkg")
+ pytester.mkpydir("testpkg")
contents = {
"testpkg/test_pkg": """
import pkg_resources
assert res == 'Load me please.'
"""
}
- testdir.makepyfile(**contents)
- testdir.maketxtfile(**{"testpkg/resource": "Load me please."})
+ pytester.makepyfile(**contents)
+ pytester.maketxtfile(**{"testpkg/resource": "Load me please."})
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.assert_outcomes(passed=1)
def test_read_pyc(self, tmp_path: Path) -> None:
py_compile.compile(str(source), str(pyc))
contents = pyc.read_bytes()
- strip_bytes = 20 # header is around 8 bytes, strip a little more
+ strip_bytes = 20 # header is around 16 bytes, strip a little more
assert len(contents) > strip_bytes
pyc.write_bytes(contents[:strip_bytes])
assert _read_pyc(source, pyc) is None # no error
- def test_reload_is_same_and_reloads(self, testdir: Testdir) -> None:
+ @pytest.mark.skipif(
+ sys.version_info < (3, 7), reason="Only the Python 3.7 format for simplicity"
+ )
+ def test_read_pyc_more_invalid(self, tmp_path: Path) -> None:
+ from _pytest.assertion.rewrite import _read_pyc
+
+ source = tmp_path / "source.py"
+ pyc = tmp_path / "source.pyc"
+
+ source_bytes = b"def test(): pass\n"
+ source.write_bytes(source_bytes)
+
+ magic = importlib.util.MAGIC_NUMBER
+
+ flags = b"\x00\x00\x00\x00"
+
+ mtime = b"\x58\x3c\xb0\x5f"
+ mtime_int = int.from_bytes(mtime, "little")
+ os.utime(source, (mtime_int, mtime_int))
+
+ size = len(source_bytes).to_bytes(4, "little")
+
+ code = marshal.dumps(compile(source_bytes, str(source), "exec"))
+
+ # Good header.
+ pyc.write_bytes(magic + flags + mtime + size + code)
+ assert _read_pyc(source, pyc, print) is not None
+
+ # Too short.
+ pyc.write_bytes(magic + flags + mtime)
+ assert _read_pyc(source, pyc, print) is None
+
+ # Bad magic.
+ pyc.write_bytes(b"\x12\x34\x56\x78" + flags + mtime + size + code)
+ assert _read_pyc(source, pyc, print) is None
+
+ # Unsupported flags.
+ pyc.write_bytes(magic + b"\x00\xff\x00\x00" + mtime + size + code)
+ assert _read_pyc(source, pyc, print) is None
+
+ # Bad mtime.
+ pyc.write_bytes(magic + flags + b"\x58\x3d\xb0\x5f" + size + code)
+ assert _read_pyc(source, pyc, print) is None
+
+ # Bad size.
+ pyc.write_bytes(magic + flags + mtime + b"\x99\x00\x00\x00" + code)
+ assert _read_pyc(source, pyc, print) is None
+
+ def test_reload_is_same_and_reloads(self, pytester: Pytester) -> None:
"""Reloading a (collected) module after change picks up the change."""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_files = *.py
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
file="""
def reloaded():
return False
assert file.reloaded()
""",
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed*"])
- def test_get_data_support(self, testdir):
+ def test_get_data_support(self, pytester: Pytester) -> None:
"""Implement optional PEP302 api (#808)."""
- path = testdir.mkpydir("foo")
- path.join("test_foo.py").write(
+ path = pytester.mkpydir("foo")
+ path.joinpath("test_foo.py").write_text(
textwrap.dedent(
"""\
class Test(object):
"""
)
)
- path.join("data.txt").write("Hey")
- result = testdir.runpytest()
+ path.joinpath("data.txt").write_text("Hey")
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_issue731(testdir):
- testdir.makepyfile(
+def test_issue731(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
class LongReprWithBraces(object):
def __repr__(self):
assert obj.some_method()
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.no_fnmatch_line("*unbalanced braces*")
class TestIssue925:
- def test_simple_case(self, testdir):
- testdir.makepyfile(
+ def test_simple_case(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_ternary_display():
assert (False == False) == False
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == False) == False"])
- def test_long_case(self, testdir):
- testdir.makepyfile(
+ def test_long_case(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_ternary_display():
assert False == (False == True) == True
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == True) == True"])
- def test_many_brackets(self, testdir):
- testdir.makepyfile(
+ def test_many_brackets(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_ternary_display():
assert True == ((False == True) == True)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert True == ((False == True) == True)"])
class TestIssue2121:
- def test_rewrite_python_files_contain_subdirs(self, testdir):
- testdir.makepyfile(
+ def test_rewrite_python_files_contain_subdirs(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
**{
"tests/file.py": """
def test_simple_failure():
"""
}
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_files = tests/**.py
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"])
sys.maxsize <= (2 ** 31 - 1), reason="Causes OverflowError on 32bit systems"
)
@pytest.mark.parametrize("offset", [-1, +1])
-def test_source_mtime_long_long(testdir, offset):
+def test_source_mtime_long_long(pytester: Pytester, offset) -> None:
"""Support modification dates after 2038 in rewritten files (#4903).
pytest would crash with:
fp.write(struct.pack("<ll", mtime, size))
E struct.error: argument out of range
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def test(): pass
"""
# +1 offset also tests masking of 0xFFFFFFFF
timestamp = 2 ** 32 + offset
os.utime(str(p), (timestamp, timestamp))
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 0
-def test_rewrite_infinite_recursion(testdir, pytestconfig, monkeypatch) -> None:
+def test_rewrite_infinite_recursion(
+ pytester: Pytester, pytestconfig, monkeypatch
+) -> None:
"""Fix infinite recursion when writing pyc files: if an import happens to be triggered when writing the pyc
file, this would cause another call to the hook, which would trigger another pyc writing, which could
trigger another import, and so on. (#3506)"""
from _pytest.assertion import rewrite as rewritemod
- testdir.syspathinsert()
- testdir.makepyfile(test_foo="def test_foo(): pass")
- testdir.makepyfile(test_bar="def test_bar(): pass")
+ pytester.syspathinsert()
+ pytester.makepyfile(test_foo="def test_foo(): pass")
+ pytester.makepyfile(test_bar="def test_bar(): pass")
original_write_pyc = rewritemod._write_pyc
class TestEarlyRewriteBailout:
@pytest.fixture
- def hook(self, pytestconfig, monkeypatch, testdir) -> AssertionRewritingHook:
+ def hook(
+ self, pytestconfig, monkeypatch, pytester: Pytester
+ ) -> AssertionRewritingHook:
"""Returns a patched AssertionRewritingHook instance so we can configure its initial paths and track
if PathFinder.find_spec has been called.
"""
import importlib.machinery
- self.find_spec_calls = [] # type: List[str]
- self.initial_paths = set() # type: Set[py.path.local]
+ self.find_spec_calls: List[str] = []
+ self.initial_paths: Set[py.path.local] = set()
class StubSession:
_initialpaths = self.initial_paths
hook.fnpats[:] = ["test_*.py", "*_test.py"]
monkeypatch.setattr(hook, "_find_spec", spy_find_spec)
hook.set_session(StubSession()) # type: ignore[arg-type]
- testdir.syspathinsert()
+ pytester.syspathinsert()
return hook
- def test_basic(self, testdir, hook: AssertionRewritingHook) -> None:
+ def test_basic(self, pytester: Pytester, hook: AssertionRewritingHook) -> None:
"""
Ensure we avoid calling PathFinder.find_spec when we know for sure a certain
module will not be rewritten to optimize assertion rewriting (#3918).
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
@pytest.fixture
def fix(): return 1
"""
)
- testdir.makepyfile(test_foo="def test_foo(): pass")
- testdir.makepyfile(bar="def bar(): pass")
- foobar_path = testdir.makepyfile(foobar="def foobar(): pass")
- self.initial_paths.add(foobar_path)
+ pytester.makepyfile(test_foo="def test_foo(): pass")
+ pytester.makepyfile(bar="def bar(): pass")
+ foobar_path = pytester.makepyfile(foobar="def foobar(): pass")
+ self.initial_paths.add(py.path.local(foobar_path))
# conftest files should always be rewritten
assert hook.find_spec("conftest") is not None
assert self.find_spec_calls == ["conftest", "test_foo", "foobar"]
def test_pattern_contains_subdirectories(
- self, testdir, hook: AssertionRewritingHook
+ self, pytester: Pytester, hook: AssertionRewritingHook
) -> None:
"""If one of the python_files patterns contain subdirectories ("tests/**.py") we can't bailout early
because we need to match with the full path, which can only be found by calling PathFinder.find_spec
"""
- p = testdir.makepyfile(
+ pytester.makepyfile(
**{
"tests/file.py": """\
def test_simple_failure():
"""
}
)
- testdir.syspathinsert(p.dirpath())
+ pytester.syspathinsert("tests")
hook.fnpats[:] = ["tests/**.py"]
assert hook.find_spec("file") is not None
assert self.find_spec_calls == ["file"]
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="cannot remove cwd on Windows"
)
- def test_cwd_changed(self, testdir, monkeypatch):
+ def test_cwd_changed(self, pytester: Pytester, monkeypatch) -> None:
# Setup conditions for py's fspath trying to import pathlib on py34
# always (previously triggered via xdist only).
# Ref: https://github.com/pytest-dev/py/pull/207
monkeypatch.syspath_prepend("")
monkeypatch.delitem(sys.modules, "pathlib", raising=False)
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"test_setup_nonexisting_cwd.py": """\
import os
""",
}
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
class TestAssertionPass:
- def test_option_default(self, testdir):
- config = testdir.parseconfig()
+ def test_option_default(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
assert config.getini("enable_assertion_pass_hook") is False
@pytest.fixture
- def flag_on(self, testdir):
- testdir.makeini("[pytest]\nenable_assertion_pass_hook = True\n")
+ def flag_on(self, pytester: Pytester):
+ pytester.makeini("[pytest]\nenable_assertion_pass_hook = True\n")
@pytest.fixture
- def hook_on(self, testdir):
- testdir.makeconftest(
+ def hook_on(self, pytester: Pytester):
+ pytester.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
- def test_hook_call(self, testdir, flag_on, hook_on):
- testdir.makepyfile(
+ def test_hook_call(self, pytester: Pytester, flag_on, hook_on) -> None:
+ pytester.makepyfile(
"""\
def test_simple():
a=1
assert False, "assert with message"
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
"*Assertion Passed: a+b == c+d (1 + 2) == (3 + 0) at line 7*"
)
- def test_hook_call_with_parens(self, testdir, flag_on, hook_on):
- testdir.makepyfile(
+ def test_hook_call_with_parens(self, pytester: Pytester, flag_on, hook_on) -> None:
+ pytester.makepyfile(
"""\
def f(): return 1
def test():
assert f()
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines("*Assertion Passed: f() 1")
- def test_hook_not_called_without_hookimpl(self, testdir, monkeypatch, flag_on):
+ def test_hook_not_called_without_hookimpl(
+ self, pytester: Pytester, monkeypatch, flag_on
+ ) -> None:
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test_simple():
a=1
assert a+b == c+d
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.assert_outcomes(passed=1)
- def test_hook_not_called_without_cmd_option(self, testdir, monkeypatch):
+ def test_hook_not_called_without_cmd_option(
+ self, pytester: Pytester, monkeypatch
+ ) -> None:
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test_simple():
a=1
assert a+b == c+d
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.assert_outcomes(passed=1)
# fmt: on
),
)
-def test_get_assertion_exprs(src, expected):
+def test_get_assertion_exprs(src, expected) -> None:
assert _get_assertion_exprs(src) == expected
(None, "/home/projects/src/foo.py", "/home/projects/src/__pycache__"),
],
)
- def test_get_cache_dir(self, monkeypatch, prefix, source, expected):
- if prefix:
- if sys.version_info < (3, 8):
- pytest.skip("pycache_prefix not available in py<38")
- monkeypatch.setattr(sys, "pycache_prefix", prefix) # type:ignore
+ def test_get_cache_dir(self, monkeypatch, prefix, source, expected) -> None:
+ monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
+
+ if prefix is not None and sys.version_info < (3, 8):
+ pytest.skip("pycache_prefix not available in py<38")
+ monkeypatch.setattr(sys, "pycache_prefix", prefix, raising=False)
assert get_cache_dir(Path(source)) == Path(expected)
@pytest.mark.skipif(
sys.version_info < (3, 8), reason="pycache_prefix not available in py<38"
)
- def test_sys_pycache_prefix_integration(self, tmp_path, monkeypatch, testdir):
+ def test_sys_pycache_prefix_integration(
+ self, tmp_path, monkeypatch, pytester: Pytester
+ ) -> None:
"""Integration test for sys.pycache_prefix (#4730)."""
pycache_prefix = tmp_path / "my/pycs"
monkeypatch.setattr(sys, "pycache_prefix", str(pycache_prefix))
monkeypatch.setattr(sys, "dont_write_bytecode", False)
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"src/test_foo.py": """
import bar
"src/bar/__init__.py": "",
}
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 0
- test_foo = Path(testdir.tmpdir) / "src/test_foo.py"
- bar_init = Path(testdir.tmpdir) / "src/bar/__init__.py"
+ test_foo = pytester.path.joinpath("src/test_foo.py")
+ bar_init = pytester.path.joinpath("src/bar/__init__.py")
assert test_foo.is_file()
assert bar_init.is_file()
import pytest
from _pytest.config import ExitCode
+from _pytest.pytester import Pytester
from _pytest.pytester import Testdir
pytest_plugins = ("pytester",)
)
)
result = testdir.runpytest("-v")
- result.stdout.fnmatch_lines(
- ["cachedir: {abscache}".format(abscache=external_cache)]
- )
+ result.stdout.fnmatch_lines([f"cachedir: {external_cache}"])
def test_cache_show(testdir):
"",
"<Module pkg1/test_1.py>",
" <Function test_fail>",
- "*= 1 deselected in *",
+ "*= 1/2 tests collected (1 deselected) in *",
],
)
" <Function test_fail>",
" <Function test_other>",
"",
- "*= 1 deselected in *",
+ "*= 2/3 tests collected (1 deselected) in *",
],
consecutive=True,
)
"<Module pkg1/test_1.py>",
" <Function test_pass>",
"",
- "*= no tests ran in*",
+ "*= 1 test collected in*",
],
consecutive=True,
)
assert result.ret == 0
- def test_packages(self, testdir: Testdir) -> None:
+ def test_packages(self, pytester: Pytester) -> None:
"""Regression test for #7758.
The particular issue here was that Package nodes were included in the
The tests includes a test in an __init__.py file just to make sure the
fix doesn't somehow regress that, it is not critical for the issue.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"__init__.py": "",
"a/__init__.py": "def test_a_init(): assert False",
"b/test_two.py": "def test_2(): assert False",
},
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_files = *.py
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.assert_outcomes(failed=3)
- result = testdir.runpytest("--lf")
+ result = pytester.runpytest("--lf")
result.assert_outcomes(failed=3)
from _pytest.cacheprovider import Cache
config = testdir.parseconfig()
- cache = Cache.for_config(config)
+ cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", "bar")
msg = "# Created by pytest automatically.\n*\n"
gitignore_path = cache._cachedir.joinpath(".gitignore")
"""
)
config = testdir.parseconfig()
- cache = Cache.for_config(config)
+ cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", "bar")
assert os.path.isdir("v") # cache contents
from _pytest.cacheprovider import CACHEDIR_TAG_CONTENT
config = testdir.parseconfig()
- cache = Cache.for_config(config)
+ cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", "bar")
cachedir_tag_path = cache._cachedir.joinpath("CACHEDIR.TAG")
assert cachedir_tag_path.read_bytes() == CACHEDIR_TAG_CONTENT
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
+from _pytest.capture import CaptureFixture
from _pytest.capture import CaptureManager
from _pytest.capture import CaptureResult
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
-from _pytest.pytester import Testdir
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
- def test_capturing_basic_api(self, method):
+ def test_capturing_basic_api(self, method) -> None:
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
@pytest.mark.parametrize("method", ["fd", "sys"])
-def test_capturing_unicode(testdir, method):
+def test_capturing_unicode(pytester: Pytester, method: str) -> None:
obj = "'b\u00f6y'"
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
"""
% obj
)
- result = testdir.runpytest("--capture=%s" % method)
+ result = pytester.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
-def test_capturing_bytes_in_utf8_encoding(testdir, method):
- testdir.makepyfile(
+def test_capturing_bytes_in_utf8_encoding(pytester: Pytester, method: str) -> None:
+ pytester.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
- result = testdir.runpytest("--capture=%s" % method)
+ result = pytester.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_collect_capturing(testdir):
- p = testdir.makepyfile(
+def test_collect_capturing(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import sys
import xyz42123
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
class TestPerTestCapturing:
- def test_capture_and_fixtures(self, testdir):
- p = testdir.makepyfile(
+ def test_capture_and_fixtures(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def setup_module(mod):
print("setup module")
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
)
@pytest.mark.xfail(reason="unimplemented feature")
- def test_capture_scope_cache(self, testdir):
- p = testdir.makepyfile(
+ def test_capture_scope_cache(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import sys
def setup_module(func):
print("in teardown")
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
]
)
- def test_no_carry_over(self, testdir):
- p = testdir.makepyfile(
+ def test_no_carry_over(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_func1():
print("in func1")
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
- def test_teardown_capturing(self, testdir):
- p = testdir.makepyfile(
+ def test_teardown_capturing(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def setup_function(function):
print("setup func1")
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
]
)
- def test_teardown_capturing_final(self, testdir):
- p = testdir.makepyfile(
+ def test_teardown_capturing_final(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
]
)
- def test_capturing_outerr(self, testdir):
- p1 = testdir.makepyfile(
+ def test_capturing_outerr(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""\
import sys
def test_capturing():
raise ValueError
"""
)
- result = testdir.runpytest(p1)
+ result = pytester.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
class TestLoggingInteraction:
- def test_logging_stream_ownership(self, testdir):
- p = testdir.makepyfile(
+ def test_logging_stream_ownership(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
def test_logging():
import logging
stream.close() # to free memory/release resources
"""
)
- result = testdir.runpytest_subprocess(p)
+ result = pytester.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
- def test_logging_and_immediate_setupteardown(self, testdir):
- p = testdir.makepyfile(
+ def test_logging_and_immediate_setupteardown(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
import logging
def setup_function(function):
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
- result = testdir.runpytest_subprocess(p, *optargs)
+ result = pytester.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
# verify proper termination
assert "closed" not in s
- def test_logging_and_crossscope_fixtures(self, testdir):
- p = testdir.makepyfile(
+ def test_logging_and_crossscope_fixtures(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
import logging
def setup_module(function):
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
- result = testdir.runpytest_subprocess(p, *optargs)
+ result = pytester.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
# verify proper termination
assert "closed" not in s
- def test_conftestlogging_is_shown(self, testdir):
- testdir.makeconftest(
+ def test_conftestlogging_is_shown(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
- result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
+ result = pytester.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
- def test_conftestlogging_and_test_logging(self, testdir):
- testdir.makeconftest(
+ def test_conftestlogging_and_test_logging(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""\
def test_hello():
import logging
assert 0
"""
)
- result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
+ result = pytester.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
- def test_logging_after_cap_stopped(self, testdir):
- testdir.makeconftest(
+ def test_logging_after_cap_stopped(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""\
import pytest
import logging
"""
)
# make sure that logging is still captured in tests
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
raise KeyboardInterrupt()
"""
)
- result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
+ result = pytester.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
- def test_std_functional(self, testdir, opt):
- reprec = testdir.inline_runsource(
+ def test_std_functional(self, pytester: Pytester, opt) -> None:
+ reprec = pytester.inline_runsource(
"""\
def test_hello(capsys):
print(42)
)
reprec.assertoutcome(passed=1)
- def test_capsyscapfd(self, testdir):
- p = testdir.makepyfile(
+ def test_capsyscapfd(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
def test_one(capsys, capfd):
pass
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
]
)
- def test_capturing_getfixturevalue(self, testdir):
+ def test_capturing_getfixturevalue(self, pytester: Pytester) -> None:
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
request.getfixturevalue("capsys")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
]
)
- def test_capsyscapfdbinary(self, testdir):
- p = testdir.makepyfile(
+ def test_capsyscapfdbinary(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
- def test_capture_is_represented_on_failure_issue128(self, testdir, method):
- p = testdir.makepyfile(
+ def test_capture_is_represented_on_failure_issue128(
+ self, pytester: Pytester, method
+ ) -> None:
+ p = pytester.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
method
)
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
- def test_stdfd_functional(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_stdfd_functional(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""\
def test_hello(capfd):
import os
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("nl", ("\n", "\r\n", "\r"))
- def test_cafd_preserves_newlines(self, capfd, nl):
+ def test_cafd_preserves_newlines(self, capfd, nl) -> None:
print("test", end=nl)
out, err = capfd.readouterr()
assert out.endswith(nl)
- def test_capfdbinary(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_capfdbinary(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
)
reprec.assertoutcome(passed=1)
- def test_capsysbinary(self, testdir):
- p1 = testdir.makepyfile(
+ def test_capsysbinary(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
print("stderr after", file=sys.stderr)
"""
)
- result = testdir.runpytest(str(p1), "-rA")
+ result = pytester.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
]
)
- def test_partial_setup_failure(self, testdir):
- p = testdir.makepyfile(
+ def test_partial_setup_failure(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
- def test_keyboardinterrupt_disables_capturing(self, testdir):
- p = testdir.makepyfile(
+ def test_keyboardinterrupt_disables_capturing(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""\
def test_hello(capfd):
import os
raise KeyboardInterrupt()
"""
)
- result = testdir.runpytest_subprocess(p)
+ result = pytester.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
- def test_capture_and_logging(self, testdir):
+ def test_capture_and_logging(self, pytester: Pytester) -> None:
"""#14"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
- result = testdir.runpytest_subprocess(p)
+ result = pytester.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
- def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
- testdir.makepyfile(
+ def test_disabled_capture_fixture(
+ self, pytester: Pytester, fixture: str, no_capture: bool
+ ) -> None:
+ pytester.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
)
)
args = ("-s",) if no_capture else ()
- result = testdir.runpytest_subprocess(*args)
+ result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
- def test_disabled_capture_fixture_twice(self, testdir: Testdir) -> None:
+ def test_disabled_capture_fixture_twice(self, pytester: Pytester) -> None:
"""Test that an inner disabled() exit doesn't undo an outer disabled().
Issue #7148.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_disabled(capfd):
print('captured before')
assert capfd.readouterr() == ('captured before\\ncaptured after\\n', '')
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*while capture is disabled 1",
)
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
- def test_fixture_use_by_other_fixtures(self, testdir, fixture):
+ def test_fixture_use_by_other_fixtures(self, pytester: Pytester, fixture) -> None:
"""Ensure that capsys and capfd can be used by other fixtures during
setup and teardown."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
import sys
import pytest
fixture=fixture
)
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
- def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
+ def test_fixture_use_by_other_fixtures_teardown(
+ self, pytester: Pytester, cap
+ ) -> None:
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
import sys
import pytest
cap=cap
)
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_setup_failure_does_not_kill_capturing(testdir):
- sub1 = testdir.mkpydir("sub1")
- sub1.join("conftest.py").write(
+def test_setup_failure_does_not_kill_capturing(pytester: Pytester) -> None:
+ sub1 = pytester.mkpydir("sub1")
+ sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
"""
)
)
- sub1.join("test_mod.py").write("def test_func1(): pass")
- result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
+ sub1.joinpath("test_mod.py").write_text("def test_func1(): pass")
+ result = pytester.runpytest(pytester.path, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
-def test_capture_conftest_runtest_setup(testdir):
- testdir.makeconftest(
+def test_capture_conftest_runtest_setup(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
- testdir.makepyfile("def test_func(): pass")
- result = testdir.runpytest()
+ pytester.makepyfile("def test_func(): pass")
+ result = pytester.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
-def test_capture_badoutput_issue412(testdir):
- testdir.makepyfile(
+def test_capture_badoutput_issue412(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import os
assert 0
"""
)
- result = testdir.runpytest("--capture=fd")
+ result = pytester.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
)
-def test_capture_early_option_parsing(testdir):
- testdir.makeconftest(
+def test_capture_early_option_parsing(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
- testdir.makepyfile("def test_func(): pass")
- result = testdir.runpytest("-vs")
+ pytester.makepyfile("def test_func(): pass")
+ result = pytester.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
-def test_capture_binary_output(testdir):
- testdir.makepyfile(
+def test_capture_binary_output(pytester: Pytester) -> None:
+ pytester.makepyfile(
r"""
import pytest
test_foo()
"""
)
- result = testdir.runpytest("--assert=plain")
+ result = pytester.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
-def test_error_during_readouterr(testdir):
+def test_error_during_readouterr(pytester: Pytester) -> None:
"""Make sure we suspend capturing if errors occur during readouterr"""
- testdir.makepyfile(
+ pytester.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
FDCapture.snap = bad_snap
"""
)
- result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
+ result = pytester.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
- def test_text(self):
+ def test_text(self) -> None:
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
- def test_unicode_and_str_mixture(self):
+ def test_unicode_and_str_mixture(self) -> None:
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
- def test_write_bytes_to_buffer(self):
+ def test_write_bytes_to_buffer(self) -> None:
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
class TestTeeCaptureIO(TestCaptureIO):
- def test_text(self):
+ def test_text(self) -> None:
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("hello")
f.close()
sio.close()
- def test_unicode_and_str_mixture(self):
+ def test_unicode_and_str_mixture(self) -> None:
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
-def test_dontreadfrominput():
+def test_dontreadfrominput() -> None:
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
@pytest.fixture
-def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
- f = testdir.makepyfile("").open("wb+")
+def tmpfile(pytester: Pytester) -> Generator[BinaryIO, None, None]:
+ f = pytester.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
- pytest.skip("could not run 'lsof' ({!r})".format(exc))
+ pytest.skip(f"could not run 'lsof' ({exc!r})")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
class TestFDCapture:
- def test_simple(self, tmpfile):
+ def test_simple(self, tmpfile: BinaryIO) -> None:
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
cap.done()
assert s == "hello"
- def test_simple_many(self, tmpfile):
+ def test_simple_many(self, tmpfile: BinaryIO) -> None:
for i in range(10):
self.test_simple(tmpfile)
- def test_simple_many_check_open_files(self, testdir):
+ def test_simple_many_check_open_files(self, pytester: Pytester) -> None:
with lsof_check():
- with testdir.makepyfile("").open("wb+") as tmpfile:
+ with pytester.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
- def test_simple_fail_second_start(self, tmpfile):
+ def test_simple_fail_second_start(self, tmpfile: BinaryIO) -> None:
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
- def test_stderr(self):
+ def test_stderr(self) -> None:
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
cap.done()
assert s == "hello\n"
- def test_stdin(self):
+ def test_stdin(self) -> None:
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
- def test_writeorg(self, tmpfile):
+ def test_writeorg(self, tmpfile: BinaryIO) -> None:
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
stmp = stmp_file.read()
assert stmp == data2
- def test_simple_resume_suspend(self):
+ def test_simple_resume_suspend(self) -> None:
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
)
)
- def test_capfd_sys_stdout_mode(self, capfd):
+ def test_capfd_sys_stdout_mode(self, capfd) -> None:
assert "b" not in sys.stdout.mode
finally:
cap.stop_capturing()
- def test_capturing_done_simple(self):
+ def test_capturing_done_simple(self) -> None:
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
assert out == "hello"
assert err == "world"
- def test_capturing_reset_simple(self):
+ def test_capturing_reset_simple(self) -> None:
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
assert out == "hello world\n"
assert err == "hello error\n"
- def test_capturing_readouterr(self):
+ def test_capturing_readouterr(self) -> None:
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert err == "error2"
- def test_capture_results_accessible_by_attribute(self):
+ def test_capture_results_accessible_by_attribute(self) -> None:
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
assert capture_result.out == "hello"
assert capture_result.err == "world"
- def test_capturing_readouterr_unicode(self):
+ def test_capturing_readouterr_unicode(self) -> None:
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
- def test_reset_twice_error(self):
+ def test_reset_twice_error(self) -> None:
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
assert out == "hello\n"
assert not err
- def test_capturing_modify_sysouterr_in_between(self):
+ def test_capturing_modify_sysouterr_in_between(self) -> None:
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
assert sys.stdout == oldout
assert sys.stderr == olderr
- def test_capturing_error_recursive(self):
+ def test_capturing_error_recursive(self) -> None:
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
assert out1 == "cap1\n"
assert out2 == "cap2\n"
- def test_just_out_capture(self):
+ def test_just_out_capture(self) -> None:
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
assert out == "hello"
assert not err
- def test_just_err_capture(self):
+ def test_just_err_capture(self) -> None:
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
assert err == "world"
assert not out
- def test_stdin_restored(self):
+ def test_stdin_restored(self) -> None:
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
- def test_stdin_nulled_by_default(self):
+ def test_stdin_nulled_by_default(self) -> None:
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
- def test_capturing_error_recursive(self):
+ def test_capturing_error_recursive(self) -> None:
r"""For TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n"."""
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
- def test_simple_only_fd(self, testdir):
- testdir.makepyfile(
+ def test_simple_only_fd(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""\
import os
def test_x():
assert 0
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
class TestStdCaptureFDinvalidFD:
- def test_stdcapture_fd_invalid_fd(self, testdir):
- testdir.makepyfile(
+ def test_stdcapture_fd_invalid_fd(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import os
from fnmatch import fnmatch
cap.stop_capturing()
"""
)
- result = testdir.runpytest_subprocess("--capture=fd")
+ result = pytester.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
- def test_fdcapture_invalid_fd_with_fd_reuse(self, testdir):
+ def test_fdcapture_invalid_fd_with_fd_reuse(self, pytester: Pytester) -> None:
with saved_fd(1):
os.close(1)
cap = capture.FDCaptureBinary(1)
with pytest.raises(OSError):
os.write(1, b"done")
- def test_fdcapture_invalid_fd_without_fd_reuse(self, testdir):
+ def test_fdcapture_invalid_fd_without_fd_reuse(self, pytester: Pytester) -> None:
with saved_fd(1), saved_fd(2):
os.close(1)
os.close(2)
os.write(2, b"done")
-def test_capture_not_started_but_reset():
+def test_capture_not_started_but_reset() -> None:
capsys = StdCapture()
capsys.stop_capturing()
-def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
+def test_using_capsys_fixture_works_with_sys_stdout_encoding(
+ capsys: CaptureFixture[str],
+) -> None:
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
assert err == ""
-def test_capsys_results_accessible_by_attribute(capsys):
+def test_capsys_results_accessible_by_attribute(capsys: CaptureFixture[str]) -> None:
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capfile2 == capfile
-def test_close_and_capture_again(testdir):
- testdir.makepyfile(
+def test_close_and_capture_again(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import os
def test_close():
assert 0
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
@pytest.mark.parametrize(
"method", ["SysCapture(2)", "SysCapture(2, tee=True)", "FDCapture(2)"]
)
-def test_capturing_and_logging_fundamentals(testdir, method: str) -> None:
+def test_capturing_and_logging_fundamentals(pytester: Pytester, method: str) -> None:
# here we check a fundamental feature
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import sys, os
import py, logging
"""
% (method,)
)
- result = testdir.runpython(p)
+ result = pytester.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
assert "atexit" not in result.stderr.str()
-def test_error_attribute_issue555(testdir):
- testdir.makepyfile(
+def test_error_attribute_issue555(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import sys
def test_capattr():
assert sys.stderr.errors == "replace"
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
- not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
- reason="only py3.6+ on windows",
+ not sys.platform.startswith("win"), reason="only on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams() -> None:
"""
_py36_windowsconsoleio_workaround(stream)
-def test_dontreadfrominput_has_encoding(testdir):
- testdir.makepyfile(
+def test_dontreadfrominput_has_encoding(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import sys
def test_capattr():
assert sys.stderr.encoding
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_crash_on_closing_tmpfile_py27(testdir):
- p = testdir.makepyfile(
+def test_crash_on_closing_tmpfile_py27(
+ pytester: Pytester, monkeypatch: MonkeyPatch
+) -> None:
+ p = pytester.makepyfile(
"""
import threading
import sys
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
- testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
- result = testdir.runpytest_subprocess(str(p))
+ monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
+ result = pytester.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
-def test_global_capture_with_live_logging(testdir):
+def test_global_capture_with_live_logging(pytester: Pytester) -> None:
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import logging
import sys
print("end test")
"""
)
- result = testdir.runpytest_subprocess("--log-cli-level=INFO")
+ result = pytester.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
-def test_capture_with_live_logging(testdir, capture_fixture):
+def test_capture_with_live_logging(
+ pytester: Pytester, capture_fixture: CaptureFixture[str]
+) -> None:
# Issue 3819
# capture should work with live cli logging
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import logging
import sys
)
)
- result = testdir.runpytest_subprocess("--log-cli-level=INFO")
+ result = pytester.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
-def test_typeerror_encodedfile_write(testdir):
+def test_typeerror_encodedfile_write(pytester: Pytester) -> None:
"""It should behave the same with and without output capturing (#4861)."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
- result_without_capture = testdir.runpytest("-s", str(p))
- result_with_capture = testdir.runpytest(str(p))
+ result_without_capture = pytester.runpytest("-s", str(p))
+ result_with_capture = pytester.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
)
-def test_stderr_write_returns_len(capsys):
+def test_stderr_write_returns_len(capsys: CaptureFixture[str]) -> None:
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
- ef.writelines([b"line1", b"line2"])
+ ef.writelines([b"line1", b"line2"]) # type: ignore[list-item]
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value]
ef.flush()
tmpfile.seek(0)
)
-def test_logging_while_collecting(testdir):
+def test_logging_while_collecting(pytester: Pytester) -> None:
"""Issue #6240: Calls to logging.xxx() during collection causes all logging calls to be duplicated to stderr"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""\
import logging
assert False
"""
)
- result = testdir.runpytest_subprocess(p)
+ result = pytester.runpytest_subprocess(p)
assert result.ret == ExitCode.TESTS_FAILED
result.stdout.fnmatch_lines(
[
import os
import pprint
+import shutil
import sys
import textwrap
+from pathlib import Path
+from typing import List
import pytest
from _pytest.config import ExitCode
+from _pytest.fixtures import FixtureRequest
from _pytest.main import _in_venv
from _pytest.main import Session
-from _pytest.pathlib import Path
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.nodes import Item
from _pytest.pathlib import symlink_or_skip
+from _pytest.pytester import HookRecorder
+from _pytest.pytester import Pytester
from _pytest.pytester import Testdir
+def ensure_file(file_path: Path) -> Path:
+ """Ensure that file exists"""
+ file_path.parent.mkdir(parents=True, exist_ok=True)
+ file_path.touch(exist_ok=True)
+ return file_path
+
+
class TestCollector:
- def test_collect_versus_item(self):
- from pytest import Collector, Item
+ def test_collect_versus_item(self) -> None:
+ from pytest import Collector
+ from pytest import Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
- def test_check_equality(self, testdir: Testdir) -> None:
- modcol = testdir.getmodulecol(
+ def test_check_equality(self, pytester: Pytester) -> None:
+ modcol = pytester.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
- fn1 = testdir.collect_by_name(modcol, "test_pass")
+ fn1 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
- fn2 = testdir.collect_by_name(modcol, "test_pass")
+ fn2 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
assert hash(fn1) == hash(fn2)
- fn3 = testdir.collect_by_name(modcol, "test_fail")
+ fn3 = pytester.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
assert [1, 2, 3] != fn # type: ignore[comparison-overlap]
assert modcol != fn
- assert testdir.collect_by_name(modcol, "doesnotexist") is None
+ assert pytester.collect_by_name(modcol, "doesnotexist") is None
- def test_getparent(self, testdir):
- modcol = testdir.getmodulecol(
+ def test_getparent(self, pytester: Pytester) -> None:
+ modcol = pytester.getmodulecol(
"""
class TestClass:
def test_foo(self):
pass
"""
)
- cls = testdir.collect_by_name(modcol, "TestClass")
- fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
+ cls = pytester.collect_by_name(modcol, "TestClass")
+ assert isinstance(cls, pytest.Class)
+ instance = pytester.collect_by_name(cls, "()")
+ assert isinstance(instance, pytest.Instance)
+ fn = pytester.collect_by_name(instance, "test_foo")
+ assert isinstance(fn, pytest.Function)
- parent = fn.getparent(pytest.Module)
- assert parent is modcol
+ module_parent = fn.getparent(pytest.Module)
+ assert module_parent is modcol
- parent = fn.getparent(pytest.Function)
- assert parent is fn
+ function_parent = fn.getparent(pytest.Function)
+ assert function_parent is fn
- parent = fn.getparent(pytest.Class)
- assert parent is cls
+ class_parent = fn.getparent(pytest.Class)
+ assert class_parent is cls
- def test_getcustomfile_roundtrip(self, testdir):
- hello = testdir.makefile(".xxx", hello="world")
- testdir.makepyfile(
+ def test_getcustomfile_roundtrip(self, pytester: Pytester) -> None:
+ hello = pytester.makefile(".xxx", hello="world")
+ pytester.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
return CustomFile.from_parent(fspath=path, parent=parent)
"""
)
- node = testdir.getpathnode(hello)
+ node = pytester.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
- def test_can_skip_class_with_test_attr(self, testdir):
+ def test_can_skip_class_with_test_attr(self, pytester: Pytester) -> None:
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
class TestFoo(object):
__test__ = False
assert True
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS:
- def test_ignored_certain_directories(self, testdir):
- tmpdir = testdir.tmpdir
- tmpdir.ensure("build", "test_notfound.py")
- tmpdir.ensure("dist", "test_notfound.py")
- tmpdir.ensure("_darcs", "test_notfound.py")
- tmpdir.ensure("CVS", "test_notfound.py")
- tmpdir.ensure("{arch}", "test_notfound.py")
- tmpdir.ensure(".whatever", "test_notfound.py")
- tmpdir.ensure(".bzr", "test_notfound.py")
- tmpdir.ensure("normal", "test_found.py")
+ def test_ignored_certain_directories(self, pytester: Pytester) -> None:
+ tmpdir = pytester.path
+ ensure_file(tmpdir / "build" / "test_notfound.py")
+ ensure_file(tmpdir / "dist" / "test_notfound.py")
+ ensure_file(tmpdir / "_darcs" / "test_notfound.py")
+ ensure_file(tmpdir / "CVS" / "test_notfound.py")
+ ensure_file(tmpdir / "{arch}" / "test_notfound.py")
+ ensure_file(tmpdir / ".whatever" / "test_notfound.py")
+ ensure_file(tmpdir / ".bzr" / "test_notfound.py")
+ ensure_file(tmpdir / "normal" / "test_found.py")
for x in Path(str(tmpdir)).rglob("test_*.py"):
x.write_text("def test_hello(): pass", "utf-8")
- result = testdir.runpytest("--collect-only")
+ result = pytester.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
"Activate.ps1",
),
)
- def test_ignored_virtualenvs(self, testdir, fname):
+ def test_ignored_virtualenvs(self, pytester: Pytester, fname: str) -> None:
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
- testdir.tmpdir.ensure("virtual", bindir, fname)
- testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
- testfile.write("def test_hello(): pass")
+ ensure_file(pytester.path / "virtual" / bindir / fname)
+ testfile = ensure_file(pytester.path / "virtual" / "test_invenv.py")
+ testfile.write_text("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.no_fnmatch_line("*test_invenv*")
# allow test collection if user insists
- result = testdir.runpytest("--collect-in-virtualenv")
+ result = pytester.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
- result = testdir.runpytest("virtual")
+ result = pytester.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"Activate.ps1",
),
)
- def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
+ def test_ignored_virtualenvs_norecursedirs_precedence(
+ self, pytester: Pytester, fname: str
+ ) -> None:
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
- testdir.tmpdir.ensure(".virtual", bindir, fname)
- testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
- testfile.write("def test_hello(): pass")
- result = testdir.runpytest("--collect-in-virtualenv")
+ ensure_file(pytester.path / ".virtual" / bindir / fname)
+ testfile = ensure_file(pytester.path / ".virtual" / "test_invenv.py")
+ testfile.write_text("def test_hello(): pass")
+ result = pytester.runpytest("--collect-in-virtualenv")
result.stdout.no_fnmatch_line("*test_invenv*")
# ...unless the virtualenv is explicitly given on the CLI
- result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
+ result = pytester.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"Activate.ps1",
),
)
- def test__in_venv(self, testdir, fname):
+ def test__in_venv(self, testdir: Testdir, fname: str) -> None:
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
- def test_custom_norecursedirs(self, testdir):
- testdir.makeini(
+ def test_custom_norecursedirs(self, pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
- tmpdir = testdir.tmpdir
- tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
- tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
- tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
- rec = testdir.inline_run()
+ tmpdir = pytester.path
+ ensure_file(tmpdir / "mydir" / "test_hello.py").write_text("def test_1(): pass")
+ ensure_file(tmpdir / "xyz123" / "test_2.py").write_text("def test_2(): 0/0")
+ ensure_file(tmpdir / "xy" / "test_ok.py").write_text("def test_3(): pass")
+ rec = pytester.inline_run()
rec.assertoutcome(passed=1)
- rec = testdir.inline_run("xyz123/test_2.py")
+ rec = pytester.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
- def test_testpaths_ini(self, testdir, monkeypatch):
- testdir.makeini(
+ def test_testpaths_ini(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
+ pytester.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
- tmpdir = testdir.tmpdir
- tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
- tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
- tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
+ tmpdir = pytester.path
+ ensure_file(tmpdir / "env" / "test_1.py").write_text("def test_env(): pass")
+ ensure_file(tmpdir / "gui" / "test_2.py").write_text("def test_gui(): pass")
+ ensure_file(tmpdir / "uts" / "test_3.py").write_text("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
- items, reprec = testdir.inline_genitems("-v")
+ items, reprec = pytester.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
- items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
+ items, reprec = pytester.inline_genitems(tmpdir.joinpath(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
- monkeypatch.chdir(testdir.tmpdir.join(dirname))
- items, reprec = testdir.inline_genitems()
+ monkeypatch.chdir(pytester.path.joinpath(dirname))
+ items, reprec = pytester.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay:
- def test_pytest_collect_file(self, testdir):
+ def test_pytest_collect_file(self, testdir: Testdir) -> None:
wascalled = []
class Plugin:
wascalled.append(path)
testdir.makefile(".abc", "xyz")
- pytest.main([testdir.tmpdir], plugins=[Plugin()])
+ pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
class TestPrunetraceback:
- def test_custom_repr_failure(self, testdir):
- p = testdir.makepyfile(
+ def test_custom_repr_failure(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import not_exists
"""
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
- def test_collect_report_postprocessing(self, testdir):
- p = testdir.makepyfile(
+ def test_collect_report_postprocessing(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import not_exists
"""
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
outcome.force_result(rep)
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests:
- def test_ignore_collect_path(self, testdir):
- testdir.makeconftest(
+ def test_ignore_collect_path(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
- sub = testdir.mkdir("xy123")
- sub.ensure("test_hello.py").write("syntax error")
- sub.join("conftest.py").write("syntax error")
- testdir.makepyfile("def test_hello(): pass")
- testdir.makepyfile(test_one="syntax error")
- result = testdir.runpytest("--fulltrace")
+ sub = pytester.mkdir("xy123")
+ ensure_file(sub / "test_hello.py").write_text("syntax error")
+ sub.joinpath("conftest.py").write_text("syntax error")
+ pytester.makepyfile("def test_hello(): pass")
+ pytester.makepyfile(test_one="syntax error")
+ result = pytester.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
- def test_ignore_collect_not_called_on_argument(self, testdir):
- testdir.makeconftest(
+ def test_ignore_collect_not_called_on_argument(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
- p = testdir.makepyfile("def test_hello(): pass")
- result = testdir.runpytest(p)
+ p = pytester.makepyfile("def test_hello(): pass")
+ result = pytester.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
- def test_collectignore_exclude_on_option(self, testdir):
- testdir.makeconftest(
+ def test_collectignore_exclude_on_option(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
collect_ignore[:] = []
"""
)
- testdir.mkdir("hello")
- testdir.makepyfile(test_world="def test_hello(): pass")
- result = testdir.runpytest()
+ pytester.mkdir("hello")
+ pytester.makepyfile(test_world="def test_hello(): pass")
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*passed*")
- result = testdir.runpytest("--XX")
+ result = pytester.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
- def test_collectignoreglob_exclude_on_option(self, testdir):
- testdir.makeconftest(
+ def test_collectignoreglob_exclude_on_option(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
collect_ignore_glob = ['*w*l[dt]*']
def pytest_addoption(parser):
collect_ignore_glob[:] = []
"""
)
- testdir.makepyfile(test_world="def test_hello(): pass")
- testdir.makepyfile(test_welt="def test_hallo(): pass")
- result = testdir.runpytest()
+ pytester.makepyfile(test_world="def test_hello(): pass")
+ pytester.makepyfile(test_welt="def test_hallo(): pass")
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
- result = testdir.runpytest("--XX")
+ result = pytester.runpytest("--XX")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
- def test_pytest_fs_collect_hooks_are_seen(self, testdir):
- testdir.makeconftest(
+ def test_pytest_fs_collect_hooks_are_seen(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
return MyModule.from_parent(fspath=path, parent=parent)
"""
)
- testdir.mkdir("sub")
- testdir.makepyfile("def test_x(): pass")
- result = testdir.runpytest("--co")
+ pytester.mkdir("sub")
+ pytester.makepyfile("def test_x(): pass")
+ result = pytester.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
- def test_pytest_collect_file_from_sister_dir(self, testdir):
- sub1 = testdir.mkpydir("sub1")
- sub2 = testdir.mkpydir("sub2")
- conf1 = testdir.makeconftest(
+ def test_pytest_collect_file_from_sister_dir(self, pytester: Pytester) -> None:
+ sub1 = pytester.mkpydir("sub1")
+ sub2 = pytester.mkpydir("sub2")
+ conf1 = pytester.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
return MyModule1.from_parent(fspath=path, parent=parent)
"""
)
- conf1.move(sub1.join(conf1.basename))
- conf2 = testdir.makeconftest(
+ conf1.replace(sub1.joinpath(conf1.name))
+ conf2 = pytester.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
return MyModule2.from_parent(fspath=path, parent=parent)
"""
)
- conf2.move(sub2.join(conf2.basename))
- p = testdir.makepyfile("def test_x(): pass")
- p.copy(sub1.join(p.basename))
- p.copy(sub2.join(p.basename))
- result = testdir.runpytest("--co")
+ conf2.replace(sub2.joinpath(conf2.name))
+ p = pytester.makepyfile("def test_x(): pass")
+ shutil.copy(p, sub1.joinpath(p.name))
+ shutil.copy(p, sub2.joinpath(p.name))
+ result = pytester.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession:
- def test_collect_topdir(self, testdir):
- p = testdir.makepyfile("def test_func(): pass")
- id = "::".join([p.basename, "test_func"])
+ def test_collect_topdir(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("def test_func(): pass")
+ id = "::".join([p.name, "test_func"])
# XXX migrate to collectonly? (see below)
- config = testdir.parseconfig(id)
- topdir = testdir.tmpdir
+ config = pytester.parseconfig(id)
+ topdir = pytester.path
rcol = Session.from_config(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
assert len(colitems) == 1
assert colitems[0].fspath == p
- def get_reported_items(self, hookrec):
+ def get_reported_items(self, hookrec: HookRecorder) -> List[Item]:
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
if isinstance(x, pytest.Item)
]
- def test_collect_protocol_single_function(self, testdir):
- p = testdir.makepyfile("def test_func(): pass")
- id = "::".join([p.basename, "test_func"])
- items, hookrec = testdir.inline_genitems(id)
+ def test_collect_protocol_single_function(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("def test_func(): pass")
+ id = "::".join([p.name, "test_func"])
+ items, hookrec = pytester.inline_genitems(id)
(item,) = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
- topdir = testdir.tmpdir # noqa
+ topdir = pytester.path # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
- def test_collect_protocol_method(self, testdir):
- p = testdir.makepyfile(
+ def test_collect_protocol_method(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
- normid = p.basename + "::TestClass::test_method"
- for id in [p.basename, p.basename + "::TestClass", normid]:
- items, hookrec = testdir.inline_genitems(id)
+ normid = p.name + "::TestClass::test_method"
+ for id in [p.name, p.name + "::TestClass", normid]:
+ items, hookrec = pytester.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
- def test_collect_custom_nodes_multi_id(self, testdir):
- p = testdir.makepyfile("def test_func(): pass")
- testdir.makeconftest(
+ def test_collect_custom_nodes_multi_id(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("def test_func(): pass")
+ pytester.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
if path.basename == %r:
return SpecialFile.from_parent(fspath=path, parent=parent)
"""
- % p.basename
+ % p.name
)
- id = p.basename
+ id = p.name
- items, hookrec = testdir.inline_genitems(id)
+ items, hookrec = pytester.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
- ("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
+ ("pytest_collectreport", "report.nodeid.startswith(p.name)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
- def test_collect_subdir_event_ordering(self, testdir):
- p = testdir.makepyfile("def test_func(): pass")
- aaa = testdir.mkpydir("aaa")
- test_aaa = aaa.join("test_aaa.py")
- p.move(test_aaa)
+ def test_collect_subdir_event_ordering(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("def test_func(): pass")
+ aaa = pytester.mkpydir("aaa")
+ test_aaa = aaa.joinpath("test_aaa.py")
+ p.replace(test_aaa)
- items, hookrec = testdir.inline_genitems()
+ items, hookrec = pytester.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
]
)
- def test_collect_two_commandline_args(self, testdir):
- p = testdir.makepyfile("def test_func(): pass")
- aaa = testdir.mkpydir("aaa")
- bbb = testdir.mkpydir("bbb")
- test_aaa = aaa.join("test_aaa.py")
- p.copy(test_aaa)
- test_bbb = bbb.join("test_bbb.py")
- p.move(test_bbb)
+ def test_collect_two_commandline_args(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("def test_func(): pass")
+ aaa = pytester.mkpydir("aaa")
+ bbb = pytester.mkpydir("bbb")
+ test_aaa = aaa.joinpath("test_aaa.py")
+ shutil.copy(p, test_aaa)
+ test_bbb = bbb.joinpath("test_bbb.py")
+ p.replace(test_bbb)
id = "."
- items, hookrec = testdir.inline_genitems(id)
+ items, hookrec = pytester.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
]
)
- def test_serialization_byid(self, testdir):
- testdir.makepyfile("def test_func(): pass")
- items, hookrec = testdir.inline_genitems()
+ def test_serialization_byid(self, pytester: Pytester) -> None:
+ pytester.makepyfile("def test_func(): pass")
+ items, hookrec = pytester.inline_genitems()
assert len(items) == 1
(item,) = items
- items2, hookrec = testdir.inline_genitems(item.nodeid)
+ items2, hookrec = pytester.inline_genitems(item.nodeid)
(item2,) = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
- def test_find_byid_without_instance_parents(self, testdir):
- p = testdir.makepyfile(
+ def test_find_byid_without_instance_parents(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
- arg = p.basename + "::TestClass::test_method"
- items, hookrec = testdir.inline_genitems(arg)
+ arg = p.name + "::TestClass::test_method"
+ items, hookrec = pytester.inline_genitems(arg)
assert len(items) == 1
(item,) = items
assert item.nodeid.endswith("TestClass::test_method")
class Test_getinitialnodes:
- def test_global_file(self, testdir, tmpdir) -> None:
- x = tmpdir.ensure("x.py")
- with tmpdir.as_cwd():
- config = testdir.parseconfigure(x)
- col = testdir.getnode(config, x)
+ def test_global_file(self, pytester: Pytester) -> None:
+ tmpdir = pytester.path
+ x = ensure_file(tmpdir / "x.py")
+ with tmpdir.cwd():
+ config = pytester.parseconfigure(x)
+ col = pytester.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent is not None
assert col.parent.parent is None
- for col in col.listchain():
- assert col.config is config
+ for parent in col.listchain():
+ assert parent.config is config
- def test_pkgfile(self, testdir):
+ def test_pkgfile(self, pytester: Pytester) -> None:
"""Verify nesting when a module is within a package.
The parent chain should match: Module<x.py> -> Package<subdir> -> Session.
Session's parent should always be None.
"""
- tmpdir = testdir.tmpdir
- subdir = tmpdir.join("subdir")
- x = subdir.ensure("x.py")
- subdir.ensure("__init__.py")
- with subdir.as_cwd():
- config = testdir.parseconfigure(x)
- col = testdir.getnode(config, x)
+ tmpdir = pytester.path
+ subdir = tmpdir.joinpath("subdir")
+ x = ensure_file(subdir / "x.py")
+ ensure_file(subdir / "__init__.py")
+ with subdir.cwd():
+ config = pytester.parseconfigure(x)
+ col = pytester.getnode(config, x)
+ assert col is not None
assert col.name == "x.py"
assert isinstance(col, pytest.Module)
assert isinstance(col.parent, pytest.Package)
assert isinstance(col.parent.parent, pytest.Session)
# session is batman (has no parents)
assert col.parent.parent.parent is None
- for col in col.listchain():
- assert col.config is config
+ for parent in col.listchain():
+ assert parent.config is config
class Test_genitems:
- def test_check_collect_hashes(self, testdir):
- p = testdir.makepyfile(
+ def test_check_collect_hashes(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_1():
pass
pass
"""
)
- p.copy(p.dirpath(p.purebasename + "2" + ".py"))
- items, reprec = testdir.inline_genitems(p.dirpath())
+ shutil.copy(p, p.parent / (p.stem + "2" + ".py"))
+ items, reprec = pytester.inline_genitems(p.parent)
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
assert hash(i) != hash(j)
assert i != j
- def test_example_items1(self, testdir):
- p = testdir.makepyfile(
+ def test_example_items1(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
pass
"""
)
- items, reprec = testdir.inline_genitems(p)
+ items, reprec = pytester.inline_genitems(p)
assert len(items) == 4
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[3].name == "testmethod_two[.[]"
# let's also test getmodpath here
- assert items[0].getmodpath() == "testone"
- assert items[1].getmodpath() == "TestX.testmethod_one"
- assert items[2].getmodpath() == "TestY.testmethod_one"
+ assert items[0].getmodpath() == "testone" # type: ignore[attr-defined]
+ assert items[1].getmodpath() == "TestX.testmethod_one" # type: ignore[attr-defined]
+ assert items[2].getmodpath() == "TestY.testmethod_one" # type: ignore[attr-defined]
# PR #6202: Fix incorrect result of getmodpath method. (Resolves issue #6189)
- assert items[3].getmodpath() == "TestY.testmethod_two[.[]"
+ assert items[3].getmodpath() == "TestY.testmethod_two[.[]" # type: ignore[attr-defined]
- s = items[0].getmodpath(stopatmodule=False)
+ s = items[0].getmodpath(stopatmodule=False) # type: ignore[attr-defined]
assert s.endswith("test_example_items1.testone")
print(s)
- def test_class_and_functions_discovery_using_glob(self, testdir):
+ def test_class_and_functions_discovery_using_glob(self, pytester: Pytester) -> None:
"""Test that Python_classes and Python_functions config options work
as prefixes and glob-like patterns (#600)."""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
"""
)
- items, reprec = testdir.inline_genitems(p)
- ids = [x.getmodpath() for x in items]
+ items, reprec = pytester.inline_genitems(p)
+ ids = [x.getmodpath() for x in items] # type: ignore[attr-defined]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
-def test_matchnodes_two_collections_same_file(testdir):
- testdir.makeconftest(
+def test_matchnodes_two_collections_same_file(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
def pytest_configure(config):
pass
"""
)
- p = testdir.makefile(".abc", "")
- result = testdir.runpytest()
+ p = pytester.makefile(".abc", "")
+ result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
- res = testdir.runpytest("%s::item2" % p.basename)
+ res = pytester.runpytest("%s::item2" % p.name)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords:
- def test_no_under(self, testdir):
- modcol = testdir.getmodulecol(
+ def test_no_under(self, pytester: Pytester) -> None:
+ modcol = pytester.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
- def test_issue345(self, testdir):
- testdir.makepyfile(
+ def test_issue345(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
pass
"""
)
- reprec = testdir.inline_run("-k repr")
+ reprec = pytester.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
- def test_keyword_matching_is_case_insensitive_by_default(self, testdir):
+ def test_keyword_matching_is_case_insensitive_by_default(
+ self, pytester: Pytester
+ ) -> None:
"""Check that selection via -k EXPRESSION is case-insensitive.
Since markers are also added to the node keywords, they too can
be matched without having to think about case sensitivity.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
)
num_matching_tests = 4
for expression in ("specifictopic", "SPECIFICTOPIC", "SpecificTopic"):
- reprec = testdir.inline_run("-k " + expression)
+ reprec = pytester.inline_run("-k " + expression)
reprec.assertoutcome(passed=num_matching_tests, failed=0)
)
-def test_exit_on_collection_error(testdir):
+def test_exit_on_collection_error(pytester: Pytester) -> None:
"""Verify that all collection errors are collected and no tests executed"""
- testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
+ pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
- res = testdir.runpytest()
+ res = pytester.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
)
-def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
+def test_exit_on_collection_with_maxfail_smaller_than_n_errors(
+ pytester: Pytester,
+) -> None:
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
- testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
+ pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
- res = testdir.runpytest("--maxfail=1")
+ res = pytester.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
[
res.stdout.no_fnmatch_line("*test_03*")
-def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
+def test_exit_on_collection_with_maxfail_bigger_than_n_errors(
+ pytester: Pytester,
+) -> None:
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
- testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
+ pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
- res = testdir.runpytest("--maxfail=4")
+ res = pytester.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
)
-def test_continue_on_collection_errors(testdir):
+def test_continue_on_collection_errors(pytester: Pytester) -> None:
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
- testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
+ pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
- res = testdir.runpytest("--continue-on-collection-errors")
+ res = pytester.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
)
-def test_continue_on_collection_errors_maxfail(testdir):
+def test_continue_on_collection_errors_maxfail(pytester: Pytester) -> None:
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
- testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
+ pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
- res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
+ res = pytester.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])
-def test_fixture_scope_sibling_conftests(testdir):
+def test_fixture_scope_sibling_conftests(pytester: Pytester) -> None:
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
- foo_path = testdir.mkdir("foo")
- foo_path.join("conftest.py").write(
+ foo_path = pytester.mkdir("foo")
+ foo_path.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
+ foo_path.joinpath("test_foo.py").write_text("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
- food_path = testdir.mkpydir("food")
- food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
+ food_path = pytester.mkpydir("food")
+ food_path.joinpath("test_food.py").write_text("def test_food(fix): assert fix == 1")
- res = testdir.runpytest()
+ res = pytester.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
)
-def test_collect_init_tests(testdir):
+def test_collect_init_tests(pytester: Pytester) -> None:
"""Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)"""
- p = testdir.copy_example("collect/collect_init_tests")
- result = testdir.runpytest(p, "--collect-only")
+ p = pytester.copy_example("collect/collect_init_tests")
+ result = pytester.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
" <Function test_foo>",
]
)
- result = testdir.runpytest("./tests", "--collect-only")
+ result = pytester.runpytest("./tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
]
)
# Ignores duplicates with "." and pkginit (#4310).
- result = testdir.runpytest("./tests", ".", "--collect-only")
+ result = pytester.runpytest("./tests", ".", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
]
)
# Same as before, but different order.
- result = testdir.runpytest(".", "tests", "--collect-only")
+ result = pytester.runpytest(".", "tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
" <Function test_foo>",
]
)
- result = testdir.runpytest("./tests/test_foo.py", "--collect-only")
+ result = pytester.runpytest("./tests/test_foo.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package tests>", " <Module test_foo.py>", " <Function test_foo>"]
)
result.stdout.no_fnmatch_line("*test_init*")
- result = testdir.runpytest("./tests/__init__.py", "--collect-only")
+ result = pytester.runpytest("./tests/__init__.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package tests>", " <Module __init__.py>", " <Function test_init>"]
)
result.stdout.no_fnmatch_line("*test_foo*")
-def test_collect_invalid_signature_message(testdir):
+def test_collect_invalid_signature_message(pytester: Pytester) -> None:
"""Check that we issue a proper message when we can't determine the signature of a test
function (#4026).
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
["Could not determine arguments of *.fix *: invalid method signature"]
)
-def test_collect_handles_raising_on_dunder_class(testdir):
+def test_collect_handles_raising_on_dunder_class(pytester: Pytester) -> None:
"""Handle proxy classes like Django's LazySettings that might raise on
``isinstance`` (#4266).
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
class ImproperlyConfigured(Exception):
pass
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
-def test_collect_with_chdir_during_import(testdir):
- subdir = testdir.tmpdir.mkdir("sub")
- testdir.tmpdir.join("conftest.py").write(
+def test_collect_with_chdir_during_import(pytester: Pytester) -> None:
+ subdir = pytester.mkdir("sub")
+ pytester.path.joinpath("conftest.py").write_text(
textwrap.dedent(
"""
import os
% (str(subdir),)
)
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_1():
import os
"""
% (str(subdir),)
)
- with testdir.tmpdir.as_cwd():
- result = testdir.runpytest()
+ with pytester.path.cwd():
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
# Handles relative testpaths.
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
testpaths = .
"""
)
- with testdir.tmpdir.as_cwd():
- result = testdir.runpytest("--collect-only")
+ with pytester.path.cwd():
+ result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["collected 1 item"])
-def test_collect_pyargs_with_testpaths(testdir, monkeypatch):
- testmod = testdir.mkdir("testmod")
+def test_collect_pyargs_with_testpaths(
+ pytester: Pytester, monkeypatch: MonkeyPatch
+) -> None:
+ testmod = pytester.mkdir("testmod")
# NOTE: __init__.py is not collected since it does not match python_files.
- testmod.ensure("__init__.py").write("def test_func(): pass")
- testmod.ensure("test_file.py").write("def test_func(): pass")
+ testmod.joinpath("__init__.py").write_text("def test_func(): pass")
+ testmod.joinpath("test_file.py").write_text("def test_func(): pass")
- root = testdir.mkdir("root")
- root.ensure("pytest.ini").write(
+ root = pytester.mkdir("root")
+ root.joinpath("pytest.ini").write_text(
textwrap.dedent(
"""
[pytest]
"""
)
)
- monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir), prepend=os.pathsep)
- with root.as_cwd():
- result = testdir.runpytest_subprocess()
+ monkeypatch.setenv("PYTHONPATH", str(pytester.path), prepend=os.pathsep)
+ with root.cwd():
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed in*"])
-def test_collect_symlink_file_arg(testdir):
+def test_collect_symlink_file_arg(pytester: Pytester) -> None:
"""Collect a direct symlink works even if it does not match python_files (#4325)."""
- real = testdir.makepyfile(
+ real = pytester.makepyfile(
real="""
def test_nodeid(request):
assert request.node.nodeid == "symlink.py::test_nodeid"
"""
)
- symlink = testdir.tmpdir.join("symlink.py")
+ symlink = pytester.path.joinpath("symlink.py")
symlink_or_skip(real, symlink)
- result = testdir.runpytest("-v", symlink)
+ result = pytester.runpytest("-v", symlink)
result.stdout.fnmatch_lines(["symlink.py::test_nodeid PASSED*", "*1 passed in*"])
assert result.ret == 0
-def test_collect_symlink_out_of_tree(testdir):
+def test_collect_symlink_out_of_tree(pytester: Pytester) -> None:
"""Test collection of symlink via out-of-tree rootdir."""
- sub = testdir.tmpdir.join("sub")
- real = sub.join("test_real.py")
- real.write(
+ sub = pytester.mkdir("sub")
+ real = sub.joinpath("test_real.py")
+ real.write_text(
textwrap.dedent(
"""
def test_nodeid(request):
assert request.node.nodeid == "test_real.py::test_nodeid"
"""
),
- ensure=True,
)
- out_of_tree = testdir.tmpdir.join("out_of_tree").ensure(dir=True)
- symlink_to_sub = out_of_tree.join("symlink_to_sub")
+ out_of_tree = pytester.mkdir("out_of_tree")
+ symlink_to_sub = out_of_tree.joinpath("symlink_to_sub")
symlink_or_skip(sub, symlink_to_sub)
- sub.chdir()
- result = testdir.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub)
+ os.chdir(sub)
+ result = pytester.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub)
result.stdout.fnmatch_lines(
[
# Should not contain "sub/"!
assert result.ret == 0
-def test_collectignore_via_conftest(testdir):
+def test_collect_symlink_dir(pytester: Pytester) -> None:
+ """A symlinked directory is collected."""
+ dir = pytester.mkdir("dir")
+ dir.joinpath("test_it.py").write_text("def test_it(): pass", "utf-8")
+ pytester.path.joinpath("symlink_dir").symlink_to(dir)
+ result = pytester.runpytest()
+ result.assert_outcomes(passed=2)
+
+
+def test_collectignore_via_conftest(pytester: Pytester) -> None:
"""collect_ignore in parent conftest skips importing child (issue #4592)."""
- tests = testdir.mkpydir("tests")
- tests.ensure("conftest.py").write("collect_ignore = ['ignore_me']")
+ tests = pytester.mkpydir("tests")
+ tests.joinpath("conftest.py").write_text("collect_ignore = ['ignore_me']")
- ignore_me = tests.mkdir("ignore_me")
- ignore_me.ensure("__init__.py")
- ignore_me.ensure("conftest.py").write("assert 0, 'should_not_be_called'")
+ ignore_me = tests.joinpath("ignore_me")
+ ignore_me.mkdir()
+ ignore_me.joinpath("__init__.py").touch()
+ ignore_me.joinpath("conftest.py").write_text("assert 0, 'should_not_be_called'")
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
-def test_collect_pkg_init_and_file_in_args(testdir):
- subdir = testdir.mkdir("sub")
- init = subdir.ensure("__init__.py")
- init.write("def test_init(): pass")
- p = subdir.ensure("test_file.py")
- p.write("def test_file(): pass")
+def test_collect_pkg_init_and_file_in_args(pytester: Pytester) -> None:
+ subdir = pytester.mkdir("sub")
+ init = subdir.joinpath("__init__.py")
+ init.write_text("def test_init(): pass")
+ p = subdir.joinpath("test_file.py")
+ p.write_text("def test_file(): pass")
# NOTE: without "-o python_files=*.py" this collects test_file.py twice.
# This changed/broke with "Add package scoped fixtures #2283" (2b1410895)
# initially (causing a RecursionError).
- result = testdir.runpytest("-v", str(init), str(p))
+ result = pytester.runpytest("-v", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
]
)
- result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init), str(p))
+ result = pytester.runpytest("-v", "-o", "python_files=*.py", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/__init__.py::test_init PASSED*",
)
-def test_collect_pkg_init_only(testdir):
- subdir = testdir.mkdir("sub")
- init = subdir.ensure("__init__.py")
- init.write("def test_init(): pass")
+def test_collect_pkg_init_only(pytester: Pytester) -> None:
+ subdir = pytester.mkdir("sub")
+ init = subdir.joinpath("__init__.py")
+ init.write_text("def test_init(): pass")
- result = testdir.runpytest(str(init))
+ result = pytester.runpytest(str(init))
result.stdout.fnmatch_lines(["*no tests ran in*"])
- result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init))
+ result = pytester.runpytest("-v", "-o", "python_files=*.py", str(init))
result.stdout.fnmatch_lines(["sub/__init__.py::test_init PASSED*", "*1 passed in*"])
@pytest.mark.parametrize("use_pkg", (True, False))
-def test_collect_sub_with_symlinks(use_pkg, testdir):
+def test_collect_sub_with_symlinks(use_pkg: bool, pytester: Pytester) -> None:
"""Collection works with symlinked files and broken symlinks"""
- sub = testdir.mkdir("sub")
+ sub = pytester.mkdir("sub")
if use_pkg:
- sub.ensure("__init__.py")
- sub.join("test_file.py").write("def test_file(): pass")
+ sub.joinpath("__init__.py").touch()
+ sub.joinpath("test_file.py").write_text("def test_file(): pass")
# Create a broken symlink.
- symlink_or_skip("test_doesnotexist.py", sub.join("test_broken.py"))
+ symlink_or_skip("test_doesnotexist.py", sub.joinpath("test_broken.py"))
# Symlink that gets collected.
- symlink_or_skip("test_file.py", sub.join("test_symlink.py"))
+ symlink_or_skip("test_file.py", sub.joinpath("test_symlink.py"))
- result = testdir.runpytest("-v", str(sub))
+ result = pytester.runpytest("-v", str(sub))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
)
-def test_collector_respects_tbstyle(testdir):
- p1 = testdir.makepyfile("assert 0")
- result = testdir.runpytest(p1, "--tb=native")
+def test_collector_respects_tbstyle(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("assert 0")
+ result = pytester.runpytest(p1, "--tb=native")
assert result.ret == ExitCode.INTERRUPTED
result.stdout.fnmatch_lines(
[
)
-def test_does_not_eagerly_collect_packages(testdir):
- testdir.makepyfile("def test(): pass")
- pydir = testdir.mkpydir("foopkg")
- pydir.join("__init__.py").write("assert False")
- result = testdir.runpytest()
+def test_does_not_eagerly_collect_packages(pytester: Pytester) -> None:
+ pytester.makepyfile("def test(): pass")
+ pydir = pytester.mkpydir("foopkg")
+ pydir.joinpath("__init__.py").write_text("assert False")
+ result = pytester.runpytest()
assert result.ret == ExitCode.OK
-def test_does_not_put_src_on_path(testdir):
+def test_does_not_put_src_on_path(pytester: Pytester) -> None:
# `src` is not on sys.path so it should not be importable
- testdir.tmpdir.join("src/nope/__init__.py").ensure()
- testdir.makepyfile(
+ ensure_file(pytester.path / "src/nope/__init__.py")
+ pytester.makepyfile(
"import pytest\n"
"def test():\n"
" with pytest.raises(ImportError):\n"
" import nope\n"
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.OK
-def test_fscollector_from_parent(tmpdir, request):
+def test_fscollector_from_parent(testdir: Testdir, request: FixtureRequest) -> None:
"""Ensure File.from_parent can forward custom arguments to the constructor.
Context: https://github.com/pytest-dev/pytest-cpp/pull/47
return super().from_parent(parent=parent, fspath=fspath, x=x)
collector = MyCollector.from_parent(
- parent=request.session, fspath=tmpdir / "foo", x=10
+ parent=request.session, fspath=testdir.tmpdir / "foo", x=10
)
assert collector.x == 10
class TestImportModeImportlib:
- def test_collect_duplicate_names(self, testdir):
+ def test_collect_duplicate_names(self, pytester: Pytester) -> None:
"""--import-mode=importlib can import modules with same names that are not in packages."""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"tests_a/test_foo.py": "def test_foo1(): pass",
"tests_b/test_foo.py": "def test_foo2(): pass",
}
)
- result = testdir.runpytest("-v", "--import-mode=importlib")
+ result = pytester.runpytest("-v", "--import-mode=importlib")
result.stdout.fnmatch_lines(
[
"tests_a/test_foo.py::test_foo1 *",
]
)
- def test_conftest(self, testdir):
+ def test_conftest(self, pytester: Pytester) -> None:
"""Directory containing conftest modules are not put in sys.path as a side-effect of
importing them."""
- tests_dir = testdir.tmpdir.join("tests")
- testdir.makepyfile(
+ tests_dir = pytester.path.joinpath("tests")
+ pytester.makepyfile(
**{
"tests/conftest.py": "",
"tests/test_foo.py": """
),
}
)
- result = testdir.runpytest("-v", "--import-mode=importlib")
+ result = pytester.runpytest("-v", "--import-mode=importlib")
result.stdout.fnmatch_lines(["* 1 passed in *"])
- def setup_conftest_and_foo(self, testdir):
+ def setup_conftest_and_foo(self, pytester: Pytester) -> None:
"""Setup a tests folder to be used to test if modules in that folder can be imported
due to side-effects of --import-mode or not."""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"tests/conftest.py": "",
"tests/foo.py": """
}
)
- def test_modules_importable_as_side_effect(self, testdir):
+ def test_modules_importable_as_side_effect(self, pytester: Pytester) -> None:
"""In import-modes `prepend` and `append`, we are able to import modules from folders
containing conftest.py files due to the side effect of changing sys.path."""
- self.setup_conftest_and_foo(testdir)
- result = testdir.runpytest("-v", "--import-mode=prepend")
+ self.setup_conftest_and_foo(pytester)
+ result = pytester.runpytest("-v", "--import-mode=prepend")
result.stdout.fnmatch_lines(["* 1 passed in *"])
- def test_modules_not_importable_as_side_effect(self, testdir):
+ def test_modules_not_importable_as_side_effect(self, pytester: Pytester) -> None:
"""In import-mode `importlib`, modules in folders containing conftest.py are not
importable, as don't change sys.path or sys.modules as side effect of importing
the conftest.py file.
"""
- self.setup_conftest_and_foo(testdir)
- result = testdir.runpytest("-v", "--import-mode=importlib")
- exc_name = (
- "ModuleNotFoundError" if sys.version_info[:2] > (3, 5) else "ImportError"
- )
+ self.setup_conftest_and_foo(pytester)
+ result = pytester.runpytest("-v", "--import-mode=importlib")
result.stdout.fnmatch_lines(
[
- "*{}: No module named 'foo'".format(exc_name),
- "tests?test_foo.py:2: {}".format(exc_name),
+ "*ModuleNotFoundError: No module named 'foo'",
+ "tests?test_foo.py:2: ModuleNotFoundError",
"* 1 failed in *",
]
)
-def test_does_not_crash_on_error_from_decorated_function(testdir: Testdir) -> None:
+def test_does_not_crash_on_error_from_decorated_function(pytester: Pytester) -> None:
"""Regression test for an issue around bad exception formatting due to
assertion rewriting mangling lineno's (#4984)."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
@pytest.fixture
def a(): return 4
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
# Not INTERNAL_ERROR
assert result.ret == ExitCode.INTERRUPTED
+
+
+def test_does_not_crash_on_recursive_symlink(pytester: Pytester) -> None:
+ """Regression test for an issue around recursive symlinks (#7951)."""
+ symlink_or_skip("recursive", pytester.path.joinpath("recursive"))
+ pytester.makepyfile(
+ """
+ def test_foo(): assert True
+ """
+ )
+ result = pytester.runpytest()
+
+ assert result.ret == ExitCode.OK
+ assert result.parseoutcomes() == {"passed": 1}
import enum
-import sys
from functools import partial
from functools import wraps
+from typing import TYPE_CHECKING
from typing import Union
import pytest
from _pytest.compat import is_generator
from _pytest.compat import safe_getattr
from _pytest.compat import safe_isclass
-from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import OutcomeException
+from _pytest.pytester import Pytester
if TYPE_CHECKING:
from typing_extensions import Literal
-def test_is_generator():
+def test_is_generator() -> None:
def zap():
yield # pragma: no cover
assert not is_generator(foo)
-def test_real_func_loop_limit():
+def test_real_func_loop_limit() -> None:
class Evil:
def __init__(self):
self.left = 1000
def __repr__(self):
- return "<Evil left={left}>".format(left=self.left)
+ return f"<Evil left={self.left}>"
def __getattr__(self, attr):
if not self.left:
get_real_func(evil)
-def test_get_real_func():
+def test_get_real_func() -> None:
"""Check that get_real_func correctly unwraps decorators until reaching the real function"""
def decorator(f):
assert get_real_func(wrapped_func2) is wrapped_func
-def test_get_real_func_partial():
+def test_get_real_func_partial() -> None:
"""Test get_real_func handles partial instances correctly"""
def foo(x):
assert get_real_func(partial(foo)) is foo
-def test_is_generator_asyncio(testdir):
- testdir.makepyfile(
+def test_is_generator_asyncio(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
from _pytest.compat import is_generator
import asyncio
)
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_is_generator_async_syntax(testdir):
- testdir.makepyfile(
+def test_is_generator_async_syntax(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
from _pytest.compat import is_generator
def test_is_generator_py35():
assert not is_generator(bar)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
-@pytest.mark.skipif(
- sys.version_info < (3, 6), reason="async gen syntax available in Python 3.6+"
-)
-def test_is_generator_async_gen_syntax(testdir):
- testdir.makepyfile(
+def test_is_generator_async_gen_syntax(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
from _pytest.compat import is_generator
def test_is_generator_py36():
assert not is_generator(bar)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
pytest.fail("fail should be catched")
-def test_helper_failures():
+def test_helper_failures() -> None:
helper = ErrorsHelper()
with pytest.raises(Exception):
helper.raise_exception
helper.raise_fail_outcome
-def test_safe_getattr():
+def test_safe_getattr() -> None:
helper = ErrorsHelper()
assert safe_getattr(helper, "raise_exception", "default") == "default"
assert safe_getattr(helper, "raise_fail_outcome", "default") == "default"
assert safe_getattr(helper, "raise_baseexception", "default")
-def test_safe_isclass():
+def test_safe_isclass() -> None:
assert safe_isclass(type) is True
class CrappyClass(Exception):
def test_assert_never_union() -> None:
- x = 10 # type: Union[int, str]
+ x: Union[int, str] = 10
if isinstance(x, int):
pass
def test_assert_never_enum() -> None:
E = enum.Enum("E", "a b")
- x = E.a # type: E
+ x: E = E.a
if x is E.a:
pass
def test_assert_never_literal() -> None:
- x = "a" # type: Literal["a", "b"]
+ x: Literal["a", "b"] = "a"
if x == "a":
pass
import re
import sys
import textwrap
+from pathlib import Path
from typing import Dict
from typing import List
from typing import Sequence
from typing import Tuple
+from typing import Type
+from typing import Union
import attr
import py.path
import _pytest._code
import pytest
from _pytest.compat import importlib_metadata
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import _get_plugin_specs_as_list
from _pytest.config import _iter_rewritable_modules
from _pytest.config import _strtobool
from _pytest.config.findpaths import get_common_ancestor
from _pytest.config.findpaths import locate_config
from _pytest.monkeypatch import MonkeyPatch
-from _pytest.pathlib import Path
-from _pytest.pytester import Testdir
-
-if TYPE_CHECKING:
- from typing import Type
+from _pytest.pytester import Pytester
class TestParseIni:
)
def test_getcfg_and_config(
self,
- testdir: Testdir,
+ pytester: Pytester,
tmp_path: Path,
section: str,
filename: str,
)
_, _, cfg = locate_config([sub])
assert cfg["name"] == "value"
- config = testdir.parseconfigure(str(sub))
+ config = pytester.parseconfigure(str(sub))
assert config.inicfg["name"] == "value"
- def test_setupcfg_uses_toolpytest_with_pytest(self, testdir):
- p1 = testdir.makepyfile("def test(): pass")
- testdir.makefile(
+ def test_setupcfg_uses_toolpytest_with_pytest(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test(): pass")
+ pytester.makefile(
".cfg",
setup="""
[tool:pytest]
[pytest]
testpaths=ignored
"""
- % p1.basename,
+ % p1.name,
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*, configfile: setup.cfg, *", "* 1 passed in *"])
assert result.ret == 0
- def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
+ def test_append_parse_args(
+ self, pytester: Pytester, tmp_path: Path, monkeypatch: MonkeyPatch
+ ) -> None:
monkeypatch.setenv("PYTEST_ADDOPTS", '--color no -rs --tb="short"')
- tmpdir.join("pytest.ini").write(
+ tmp_path.joinpath("pytest.ini").write_text(
textwrap.dedent(
"""\
[pytest]
"""
)
)
- config = testdir.parseconfig(tmpdir)
+ config = pytester.parseconfig(tmp_path)
assert config.option.color == "no"
assert config.option.reportchars == "s"
assert config.option.tbstyle == "short"
assert config.option.verbose
- def test_tox_ini_wrong_version(self, testdir):
- testdir.makefile(
+ def test_tox_ini_wrong_version(self, pytester: Pytester) -> None:
+ pytester.makefile(
".ini",
tox="""
[pytest]
minversion=999.0
""",
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines(
["*tox.ini: 'minversion' requires pytest-999.0, actual pytest-*"]
"section, name",
[("tool:pytest", "setup.cfg"), ("pytest", "tox.ini"), ("pytest", "pytest.ini")],
)
- def test_ini_names(self, testdir, name, section):
- testdir.tmpdir.join(name).write(
+ def test_ini_names(self, pytester: Pytester, name, section) -> None:
+ pytester.path.joinpath(name).write_text(
textwrap.dedent(
"""
[{section}]
)
)
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
assert config.getini("minversion") == "1.0"
- def test_pyproject_toml(self, testdir):
- testdir.makepyprojecttoml(
+ def test_pyproject_toml(self, pytester: Pytester) -> None:
+ pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
minversion = "1.0"
"""
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
assert config.getini("minversion") == "1.0"
- def test_toxini_before_lower_pytestini(self, testdir):
- sub = testdir.tmpdir.mkdir("sub")
- sub.join("tox.ini").write(
+ def test_toxini_before_lower_pytestini(self, pytester: Pytester) -> None:
+ sub = pytester.mkdir("sub")
+ sub.joinpath("tox.ini").write_text(
textwrap.dedent(
"""
[pytest]
"""
)
)
- testdir.tmpdir.join("pytest.ini").write(
+ pytester.path.joinpath("pytest.ini").write_text(
textwrap.dedent(
"""
[pytest]
"""
)
)
- config = testdir.parseconfigure(sub)
+ config = pytester.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
- def test_ini_parse_error(self, testdir):
- testdir.tmpdir.join("pytest.ini").write("addopts = -x")
- result = testdir.runpytest()
+ def test_ini_parse_error(self, pytester: Pytester) -> None:
+ pytester.path.joinpath("pytest.ini").write_text("addopts = -x")
+ result = pytester.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: *pytest.ini:1: no section header defined"])
@pytest.mark.xfail(reason="probably not needed")
- def test_confcutdir(self, testdir):
- sub = testdir.mkdir("sub")
- sub.chdir()
- testdir.makeini(
+ def test_confcutdir(self, pytester: Pytester) -> None:
+ sub = pytester.mkdir("sub")
+ os.chdir(sub)
+ pytester.makeini(
"""
[pytest]
addopts = --qwe
"""
)
- result = testdir.inline_run("--confcutdir=.")
+ result = pytester.inline_run("--confcutdir=.")
assert result.ret == 0
@pytest.mark.parametrize(
)
@pytest.mark.filterwarnings("default")
def test_invalid_config_options(
- self, testdir, ini_file_text, invalid_keys, warning_output, exception_text
- ):
- testdir.makeconftest(
+ self,
+ pytester: Pytester,
+ ini_file_text,
+ invalid_keys,
+ warning_output,
+ exception_text,
+ ) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("conftest_ini_key", "")
"""
)
- testdir.makepyfile("def test(): pass")
- testdir.makeini(ini_file_text)
+ pytester.makepyfile("def test(): pass")
+ pytester.makeini(ini_file_text)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
assert sorted(config._get_unknown_ini_keys()) == sorted(invalid_keys)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(warning_output)
- result = testdir.runpytest("--strict-config")
+ result = pytester.runpytest("--strict-config")
if exception_text:
result.stderr.fnmatch_lines("ERROR: " + exception_text)
assert result.ret == pytest.ExitCode.USAGE_ERROR
assert result.ret == pytest.ExitCode.OK
@pytest.mark.filterwarnings("default")
- def test_silence_unknown_key_warning(self, testdir: Testdir) -> None:
+ def test_silence_unknown_key_warning(self, pytester: Pytester) -> None:
"""Unknown config key warnings can be silenced using filterwarnings (#7620)"""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
filterwarnings =
foobar=1
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.no_fnmatch_line("*PytestConfigWarning*")
@pytest.mark.filterwarnings("default")
def test_disable_warnings_plugin_disables_config_warnings(
- self, testdir: Testdir
+ self, pytester: Pytester
) -> None:
"""Disabling 'warnings' plugin also disables config time warnings"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
def pytest_configure(config):
)
"""
)
- result = testdir.runpytest("-pno:warnings")
+ result = pytester.runpytest("-pno:warnings")
result.stdout.no_fnmatch_line("*PytestConfigWarning*")
@pytest.mark.parametrize(
],
)
def test_missing_required_plugins(
- self, testdir, monkeypatch, ini_file_text, exception_text
- ):
+ self,
+ pytester: Pytester,
+ monkeypatch: MonkeyPatch,
+ ini_file_text: str,
+ exception_text: str,
+ ) -> None:
"""Check 'required_plugins' option with various settings.
This test installs a mock "myplugin-1.5" which is used in the parametrized test cases.
def my_dists():
return [DummyDist(entry_points)]
- testdir.makepyfile(myplugin1_module="# my plugin module")
- testdir.syspathinsert()
+ pytester.makepyfile(myplugin1_module="# my plugin module")
+ pytester.syspathinsert()
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
- testdir.makeini(ini_file_text)
+ pytester.makeini(ini_file_text)
if exception_text:
with pytest.raises(pytest.UsageError, match=exception_text):
- testdir.parseconfig()
+ pytester.parseconfig()
else:
- testdir.parseconfig()
+ pytester.parseconfig()
- def test_early_config_cmdline(self, testdir, monkeypatch):
+ def test_early_config_cmdline(
+ self, pytester: Pytester, monkeypatch: MonkeyPatch
+ ) -> None:
"""early_config contains options registered by third-party plugins.
This is a regression involving pytest-cov (and possibly others) introduced in #7700.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
myplugin="""
def pytest_addoption(parser):
parser.addoption('--foo', default=None, dest='foo')
"""
)
monkeypatch.setenv("PYTEST_PLUGINS", "myplugin")
- testdir.syspathinsert()
- result = testdir.runpytest("--foo=1")
+ pytester.syspathinsert()
+ result = pytester.runpytest("--foo=1")
result.stdout.fnmatch_lines("* no tests ran in *")
class TestConfigCmdlineParsing:
- def test_parsing_again_fails(self, testdir):
- config = testdir.parseconfig()
+ def test_parsing_again_fails(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
- def test_explicitly_specified_config_file_is_loaded(self, testdir):
- testdir.makeconftest(
+ def test_explicitly_specified_config_file_is_loaded(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("custom", "")
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
custom = 0
"""
)
- testdir.makefile(
+ pytester.makefile(
".ini",
custom="""
[pytest]
custom = 1
""",
)
- config = testdir.parseconfig("-c", "custom.ini")
+ config = pytester.parseconfig("-c", "custom.ini")
assert config.getini("custom") == "1"
- testdir.makefile(
+ pytester.makefile(
".cfg",
custom_tool_pytest_section="""
[tool:pytest]
custom = 1
""",
)
- config = testdir.parseconfig("-c", "custom_tool_pytest_section.cfg")
+ config = pytester.parseconfig("-c", "custom_tool_pytest_section.cfg")
assert config.getini("custom") == "1"
- testdir.makefile(
+ pytester.makefile(
".toml",
custom="""
[tool.pytest.ini_options]
] # this is here on purpose, as it makes this an invalid '.ini' file
""",
)
- config = testdir.parseconfig("-c", "custom.toml")
+ config = pytester.parseconfig("-c", "custom.toml")
assert config.getini("custom") == "1"
- def test_absolute_win32_path(self, testdir):
- temp_ini_file = testdir.makefile(
+ def test_absolute_win32_path(self, pytester: Pytester) -> None:
+ temp_ini_file = pytester.makefile(
".ini",
custom="""
[pytest]
)
from os.path import normpath
- temp_ini_file = normpath(str(temp_ini_file))
- ret = pytest.main(["-c", temp_ini_file])
+ temp_ini_file_norm = normpath(str(temp_ini_file))
+ ret = pytest.main(["-c", temp_ini_file_norm])
assert ret == ExitCode.OK
class TestConfigAPI:
- def test_config_trace(self, testdir) -> None:
- config = testdir.parseconfig()
- values = [] # type: List[str]
+ def test_config_trace(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
+ values: List[str] = []
config.trace.root.setwriter(values.append)
config.trace("hello")
assert len(values) == 1
assert values[0] == "hello [config]\n"
- def test_config_getoption(self, testdir):
- testdir.makeconftest(
+ def test_config_getoption(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
"""
)
- config = testdir.parseconfig("--hello=this")
+ config = pytester.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, config.getoption, "qweqwe")
- def test_config_getoption_unicode(self, testdir):
- testdir.makeconftest(
+ def test_config_getoption_unicode(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addoption('--hello', type=str)
"""
)
- config = testdir.parseconfig("--hello=this")
+ config = pytester.parseconfig("--hello=this")
assert config.getoption("hello") == "this"
- def test_config_getvalueorskip(self, testdir):
- config = testdir.parseconfig()
+ def test_config_getvalueorskip(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
pytest.raises(pytest.skip.Exception, config.getvalueorskip, "hello")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
- def test_config_getvalueorskip_None(self, testdir):
- testdir.makeconftest(
+ def test_config_getvalueorskip_None(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addoption("--hello")
"""
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip("hello")
- def test_getoption(self, testdir):
- config = testdir.parseconfig()
+ def test_getoption(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
with pytest.raises(ValueError):
config.getvalue("x")
assert config.getoption("x", 1) == 1
- def test_getconftest_pathlist(self, testdir, tmpdir):
+ def test_getconftest_pathlist(self, pytester: Pytester, tmpdir) -> None:
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
- config = testdir.parseconfigure(p)
+ config = pytester.parseconfigure(p)
assert config._getconftest_pathlist("notexist", path=tmpdir) is None
- pl = config._getconftest_pathlist("pathlist", path=tmpdir)
+ pl = config._getconftest_pathlist("pathlist", path=tmpdir) or []
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
- def test_addini(self, testdir):
- testdir.makeconftest(
- """
+ @pytest.mark.parametrize("maybe_type", ["not passed", "None", '"string"'])
+ def test_addini(self, pytester: Pytester, maybe_type: str) -> None:
+ if maybe_type == "not passed":
+ type_string = ""
+ else:
+ type_string = f", {maybe_type}"
+
+ pytester.makeconftest(
+ f"""
def pytest_addoption(parser):
- parser.addini("myname", "my new ini value")
+ parser.addini("myname", "my new ini value"{type_string})
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
myname=hello
"""
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, "other")
- def make_conftest_for_pathlist(self, testdir):
- testdir.makeconftest(
+ def make_conftest_for_pathlist(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
"""
)
- def test_addini_pathlist_ini_files(self, testdir):
- self.make_conftest_for_pathlist(testdir)
- p = testdir.makeini(
+ def test_addini_pathlist_ini_files(self, pytester: Pytester) -> None:
+ self.make_conftest_for_pathlist(pytester)
+ p = pytester.makeini(
"""
[pytest]
paths=hello world/sub.py
"""
)
- self.check_config_pathlist(testdir, p)
+ self.check_config_pathlist(pytester, p)
- def test_addini_pathlist_pyproject_toml(self, testdir):
- self.make_conftest_for_pathlist(testdir)
- p = testdir.makepyprojecttoml(
+ def test_addini_pathlist_pyproject_toml(self, pytester: Pytester) -> None:
+ self.make_conftest_for_pathlist(pytester)
+ p = pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
paths=["hello", "world/sub.py"]
"""
)
- self.check_config_pathlist(testdir, p)
+ self.check_config_pathlist(pytester, p)
- def check_config_pathlist(self, testdir, config_path):
- config = testdir.parseconfig()
+ def check_config_pathlist(self, pytester: Pytester, config_path: Path) -> None:
+ config = pytester.parseconfig()
values = config.getini("paths")
assert len(values) == 2
- assert values[0] == config_path.dirpath("hello")
- assert values[1] == config_path.dirpath("world/sub.py")
+ assert values[0] == config_path.parent.joinpath("hello")
+ assert values[1] == config_path.parent.joinpath("world/sub.py")
pytest.raises(ValueError, config.getini, "other")
- def make_conftest_for_args(self, testdir):
- testdir.makeconftest(
+ def make_conftest_for_args(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
"""
)
- def test_addini_args_ini_files(self, testdir):
- self.make_conftest_for_args(testdir)
- testdir.makeini(
+ def test_addini_args_ini_files(self, pytester: Pytester) -> None:
+ self.make_conftest_for_args(pytester)
+ pytester.makeini(
"""
[pytest]
args=123 "123 hello" "this"
"""
)
- self.check_config_args(testdir)
+ self.check_config_args(pytester)
- def test_addini_args_pyproject_toml(self, testdir):
- self.make_conftest_for_args(testdir)
- testdir.makepyprojecttoml(
+ def test_addini_args_pyproject_toml(self, pytester: Pytester) -> None:
+ self.make_conftest_for_args(pytester)
+ pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
args = ["123", "123 hello", "this"]
"""
)
- self.check_config_args(testdir)
+ self.check_config_args(pytester)
- def check_config_args(self, testdir):
- config = testdir.parseconfig()
+ def check_config_args(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
values = config.getini("args")
assert values == ["123", "123 hello", "this"]
values = config.getini("a2")
assert values == list("123")
- def make_conftest_for_linelist(self, testdir):
- testdir.makeconftest(
+ def make_conftest_for_linelist(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
"""
)
- def test_addini_linelist_ini_files(self, testdir):
- self.make_conftest_for_linelist(testdir)
- testdir.makeini(
+ def test_addini_linelist_ini_files(self, pytester: Pytester) -> None:
+ self.make_conftest_for_linelist(pytester)
+ pytester.makeini(
"""
[pytest]
xy= 123 345
second line
"""
)
- self.check_config_linelist(testdir)
+ self.check_config_linelist(pytester)
- def test_addini_linelist_pprojecttoml(self, testdir):
- self.make_conftest_for_linelist(testdir)
- testdir.makepyprojecttoml(
+ def test_addini_linelist_pprojecttoml(self, pytester: Pytester) -> None:
+ self.make_conftest_for_linelist(pytester)
+ pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
xy = ["123 345", "second line"]
"""
)
- self.check_config_linelist(testdir)
+ self.check_config_linelist(pytester)
- def check_config_linelist(self, testdir):
- config = testdir.parseconfig()
+ def check_config_linelist(self, pytester: Pytester) -> None:
+ config = pytester.parseconfig()
values = config.getini("xy")
assert len(values) == 2
assert values == ["123 345", "second line"]
@pytest.mark.parametrize(
"str_val, bool_val", [("True", True), ("no", False), ("no-ini", True)]
)
- def test_addini_bool(self, testdir, str_val, bool_val):
- testdir.makeconftest(
+ def test_addini_bool(
+ self, pytester: Pytester, str_val: str, bool_val: bool
+ ) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
"""
)
if str_val != "no-ini":
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
strip=%s
"""
% str_val
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
assert config.getini("strip") is bool_val
- def test_addinivalue_line_existing(self, testdir):
- testdir.makeconftest(
+ def test_addinivalue_line_existing(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
xy= 123
"""
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
values = config.getini("xy")
assert len(values) == 1
assert values == ["123"]
assert len(values) == 2
assert values == ["123", "456"]
- def test_addinivalue_line_new(self, testdir):
- testdir.makeconftest(
+ def test_addinivalue_line_new(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
"""
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
values = config.getini("xy")
assert len(values) == 2
assert values == ["456", "123"]
- def test_confcutdir_check_isdir(self, testdir):
+ def test_confcutdir_check_isdir(self, pytester: Pytester) -> None:
"""Give an error if --confcutdir is not a valid directory (#2078)"""
exp_match = r"^--confcutdir must be a directory, given: "
with pytest.raises(pytest.UsageError, match=exp_match):
- testdir.parseconfig(
- "--confcutdir", testdir.tmpdir.join("file").ensure(file=1)
- )
+ pytester.parseconfig("--confcutdir", pytester.path.joinpath("file"))
with pytest.raises(pytest.UsageError, match=exp_match):
- testdir.parseconfig("--confcutdir", testdir.tmpdir.join("inexistant"))
- config = testdir.parseconfig(
- "--confcutdir", testdir.tmpdir.join("dir").ensure(dir=1)
- )
- assert config.getoption("confcutdir") == str(testdir.tmpdir.join("dir"))
+ pytester.parseconfig("--confcutdir", pytester.path.joinpath("inexistant"))
+
+ p = pytester.mkdir("dir")
+ config = pytester.parseconfig("--confcutdir", p)
+ assert config.getoption("confcutdir") == str(p)
@pytest.mark.parametrize(
"names, expected",
(["source/python/bar/__init__.py", "setup.py"], ["bar"]),
],
)
- def test_iter_rewritable_modules(self, names, expected):
+ def test_iter_rewritable_modules(self, names, expected) -> None:
assert list(_iter_rewritable_modules(names)) == expected
class TestConfigFromdictargs:
- def test_basic_behavior(self, _sys_snapshot):
+ def test_basic_behavior(self, _sys_snapshot) -> None:
option_dict = {"verbose": 444, "foo": "bar", "capture": "no"}
args = ["a", "b"]
def test_invocation_params_args(self, _sys_snapshot) -> None:
"""Show that fromdictargs can handle args in their "orig" format"""
- option_dict = {} # type: Dict[str, object]
+ option_dict: Dict[str, object] = {}
args = ["-vvvv", "-s", "a", "b"]
config = Config.fromdictargs(option_dict, args)
assert config.option.verbose == 4
assert config.option.capture == "no"
- def test_inifilename(self, tmpdir):
- tmpdir.join("foo/bar.ini").ensure().write(
+ def test_inifilename(self, tmp_path: Path) -> None:
+ d1 = tmp_path.joinpath("foo")
+ d1.mkdir()
+ p1 = d1.joinpath("bar.ini")
+ p1.touch()
+ p1.write_text(
textwrap.dedent(
"""\
[pytest]
inifile = "../../foo/bar.ini"
option_dict = {"inifilename": inifile, "capture": "no"}
- cwd = tmpdir.join("a/b")
- cwd.join("pytest.ini").ensure().write(
+ cwd = tmp_path.joinpath("a/b")
+ cwd.mkdir(parents=True)
+ p2 = cwd.joinpath("pytest.ini")
+ p2.touch()
+ p2.write_text(
textwrap.dedent(
"""\
[pytest]
"""
)
)
- with cwd.ensure(dir=True).as_cwd():
+ with MonkeyPatch.context() as mp:
+ mp.chdir(cwd)
config = Config.fromdictargs(option_dict, ())
inipath = py.path.local(inifile)
assert config.inicfg.get("should_not_be_set") is None
-def test_options_on_small_file_do_not_blow_up(testdir) -> None:
+def test_options_on_small_file_do_not_blow_up(pytester: Pytester) -> None:
def runfiletest(opts: Sequence[str]) -> None:
- reprec = testdir.inline_run(*opts)
+ reprec = pytester.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
- path = testdir.makepyfile(
- """
+ path = str(
+ pytester.makepyfile(
+ """
def test_f1(): assert 0
def test_f2(): assert 0
"""
+ )
)
runfiletest([path])
runfiletest(["-v", "-v", path])
-def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
+def test_preparse_ordering_with_setuptools(
+ pytester: Pytester, monkeypatch: MonkeyPatch
+) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
class EntryPoint:
return (Dist,)
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
pytest_plugins = "mytestplugin",
"""
)
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
-def test_setuptools_importerror_issue1479(testdir, monkeypatch):
+def test_setuptools_importerror_issue1479(
+ pytester: Pytester, monkeypatch: MonkeyPatch
+) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
class DummyEntryPoint:
monkeypatch.setattr(importlib_metadata, "distributions", distributions)
with pytest.raises(ImportError):
- testdir.parseconfig()
+ pytester.parseconfig()
-def test_importlib_metadata_broken_distribution(testdir, monkeypatch):
+def test_importlib_metadata_broken_distribution(
+ pytester: Pytester, monkeypatch: MonkeyPatch
+) -> None:
"""Integration test for broken distributions with 'files' metadata being None (#5389)"""
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
return (Distribution(),)
monkeypatch.setattr(importlib_metadata, "distributions", distributions)
- testdir.parseconfig()
+ pytester.parseconfig()
@pytest.mark.parametrize("block_it", [True, False])
-def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it):
+def test_plugin_preparse_prevents_setuptools_loading(
+ pytester: Pytester, monkeypatch: MonkeyPatch, block_it: bool
+) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
plugin_module_placeholder = object()
monkeypatch.setattr(importlib_metadata, "distributions", distributions)
args = ("-p", "no:mytestplugin") if block_it else ()
- config = testdir.parseconfig(*args)
+ config = pytester.parseconfig(*args)
config.pluginmanager.import_plugin("mytestplugin")
if block_it:
assert "mytestplugin" not in sys.modules
@pytest.mark.parametrize(
"parse_args,should_load", [(("-p", "mytestplugin"), True), ((), False)]
)
-def test_disable_plugin_autoload(testdir, monkeypatch, parse_args, should_load):
+def test_disable_plugin_autoload(
+ pytester: Pytester,
+ monkeypatch: MonkeyPatch,
+ parse_args: Union[Tuple[str, str], Tuple[()]],
+ should_load: bool,
+) -> None:
class DummyEntryPoint:
project_name = name = "mytestplugin"
group = "pytest11"
monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
monkeypatch.setattr(importlib_metadata, "distributions", distributions)
- monkeypatch.setitem(sys.modules, "mytestplugin", PseudoPlugin())
- config = testdir.parseconfig(*parse_args)
+ monkeypatch.setitem(sys.modules, "mytestplugin", PseudoPlugin()) # type: ignore[misc]
+ config = pytester.parseconfig(*parse_args)
has_loaded = config.pluginmanager.get_plugin("mytestplugin") is not None
assert has_loaded == should_load
if should_load:
assert PseudoPlugin.attrs_used == []
-def test_plugin_loading_order(testdir):
+def test_plugin_loading_order(pytester: Pytester) -> None:
"""Test order of plugin loading with `-p`."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
def test_terminal_plugin(request):
import myplugin
"""
},
)
- testdir.syspathinsert()
- result = testdir.runpytest("-p", "myplugin", str(p1))
+ pytester.syspathinsert()
+ result = pytester.runpytest("-p", "myplugin", str(p1))
assert result.ret == 0
-def test_cmdline_processargs_simple(testdir):
- testdir.makeconftest(
+def test_cmdline_processargs_simple(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_cmdline_preparse(args):
args.append("-h")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*pytest*", "*-h*"])
-def test_invalid_options_show_extra_information(testdir):
+def test_invalid_options_show_extra_information(pytester: Pytester) -> None:
"""Display extra information when pytest exits due to unrecognized
options in the command-line."""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
addopts = --invalid-option
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stderr.fnmatch_lines(
[
"*error: unrecognized arguments: --invalid-option*",
- "* inifile: %s*" % testdir.tmpdir.join("tox.ini"),
- "* rootdir: %s*" % testdir.tmpdir,
+ "* inifile: %s*" % pytester.path.joinpath("tox.ini"),
+ "* rootdir: %s*" % pytester.path,
]
)
["-v", "dir2", "dir1"],
],
)
-def test_consider_args_after_options_for_rootdir(testdir, args):
+def test_consider_args_after_options_for_rootdir(
+ pytester: Pytester, args: List[str]
+) -> None:
"""
Consider all arguments in the command-line for rootdir
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
- root = testdir.tmpdir.mkdir("myroot")
- d1 = root.mkdir("dir1")
- d2 = root.mkdir("dir2")
+ root = pytester.mkdir("myroot")
+ d1 = root.joinpath("dir1")
+ d1.mkdir()
+ d2 = root.joinpath("dir2")
+ d2.mkdir()
for i, arg in enumerate(args):
if arg == "dir1":
- args[i] = d1
+ args[i] = str(d1)
elif arg == "dir2":
- args[i] = d2
- with root.as_cwd():
- result = testdir.runpytest(*args)
+ args[i] = str(d2)
+ with MonkeyPatch.context() as mp:
+ mp.chdir(root)
+ result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*rootdir: *myroot"])
-def test_toolongargs_issue224(testdir):
- result = testdir.runpytest("-m", "hello" * 500)
+def test_toolongargs_issue224(pytester: Pytester) -> None:
+ result = pytester.runpytest("-m", "hello" * 500)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
-def test_config_in_subdirectory_colon_command_line_issue2148(testdir):
+def test_config_in_subdirectory_colon_command_line_issue2148(
+ pytester: Pytester,
+) -> None:
conftest_source = """
def pytest_addoption(parser):
parser.addini('foo', 'foo')
"""
- testdir.makefile(
+ pytester.makefile(
".ini",
**{"pytest": "[pytest]\nfoo = root", "subdir/pytest": "[pytest]\nfoo = subdir"},
)
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"conftest": conftest_source,
"subdir/conftest": conftest_source,
}
)
- result = testdir.runpytest("subdir/test_foo.py::test_foo")
+ result = pytester.runpytest("subdir/test_foo.py::test_foo")
assert result.ret == 0
-def test_notify_exception(testdir, capfd):
- config = testdir.parseconfig()
+def test_notify_exception(pytester: Pytester, capfd) -> None:
+ config = pytester.parseconfig()
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
_, err = capfd.readouterr()
assert not err
- config = testdir.parseconfig("-p", "no:terminal")
+ config = pytester.parseconfig("-p", "no:terminal")
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
assert "ValueError" in err
-def test_no_terminal_discovery_error(testdir):
- testdir.makepyfile("raise TypeError('oops!')")
- result = testdir.runpytest("-p", "no:terminal", "--collect-only")
+def test_no_terminal_discovery_error(pytester: Pytester) -> None:
+ pytester.makepyfile("raise TypeError('oops!')")
+ result = pytester.runpytest("-p", "no:terminal", "--collect-only")
assert result.ret == ExitCode.INTERRUPTED
assert _get_plugin_specs_as_list(("foo", "bar")) == ["foo", "bar"]
-def test_collect_pytest_prefix_bug_integration(testdir):
+def test_collect_pytest_prefix_bug_integration(pytester: Pytester) -> None:
"""Integration test for issue #3775"""
- p = testdir.copy_example("config/collect_pytest_prefix")
- result = testdir.runpytest(p)
+ p = pytester.copy_example("config/collect_pytest_prefix")
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
class TestOverrideIniArgs:
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
- def test_override_ini_names(self, testdir, name):
+ def test_override_ini_names(self, pytester: Pytester, name: str) -> None:
section = "[pytest]" if name != "setup.cfg" else "[tool:pytest]"
- testdir.tmpdir.join(name).write(
+ pytester.path.joinpath(name).write_text(
textwrap.dedent(
"""
{section}
)
)
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("custom", "")"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_pass(pytestconfig):
ini_val = pytestconfig.getini("custom")
print('\\ncustom_option:%s\\n' % ini_val)"""
)
- result = testdir.runpytest("--override-ini", "custom=2.0", "-s")
+ result = pytester.runpytest("--override-ini", "custom=2.0", "-s")
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:2.0"])
- result = testdir.runpytest(
+ result = pytester.runpytest(
"--override-ini", "custom=2.0", "--override-ini=custom=3.0", "-s"
)
assert result.ret == 0
result.stdout.fnmatch_lines(["custom_option:3.0"])
- def test_override_ini_pathlist(self, testdir):
- testdir.makeconftest(
+ def test_override_ini_pathlist(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
paths=blah.py"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import py.path
def test_pathlist(pytestconfig):
for cpf in config_paths:
print('\\nuser_path:%s' % cpf.basename)"""
)
- result = testdir.runpytest(
+ result = pytester.runpytest(
"--override-ini", "paths=foo/bar1.py foo/bar2.py", "-s"
)
result.stdout.fnmatch_lines(["user_path:bar1.py", "user_path:bar2.py"])
- def test_override_multiple_and_default(self, testdir):
- testdir.makeconftest(
+ def test_override_multiple_and_default(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
addini = parser.addini
addini("custom_option_3", "", default=False, type="bool")
addini("custom_option_4", "", default=True, type="bool")"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
custom_option_1=custom_option_1
custom_option_2=custom_option_2
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_multiple_options(pytestconfig):
prefix = "custom_option"
print('\\nini%d:%s' % (x, ini_value))
"""
)
- result = testdir.runpytest(
+ result = pytester.runpytest(
"--override-ini",
"custom_option_1=fulldir=/tmp/user1",
"-o",
]
)
- def test_override_ini_usage_error_bad_style(self, testdir):
- testdir.makeini(
+ def test_override_ini_usage_error_bad_style(self, pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
xdist_strict=False
"""
)
- result = testdir.runpytest("--override-ini", "xdist_strict", "True")
+ result = pytester.runpytest("--override-ini", "xdist_strict", "True")
result.stderr.fnmatch_lines(
[
"ERROR: -o/--override-ini expects option=value style (got: 'xdist_strict').",
)
@pytest.mark.parametrize("with_ini", [True, False])
- def test_override_ini_handled_asap(self, testdir, with_ini):
+ def test_override_ini_handled_asap(
+ self, pytester: Pytester, with_ini: bool
+ ) -> None:
"""-o should be handled as soon as possible and always override what's in ini files (#2238)"""
if with_ini:
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
python_files=test_*.py
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
unittest_ini_handle="""
def test():
pass
"""
)
- result = testdir.runpytest("--override-ini", "python_files=unittest_*.py")
+ result = pytester.runpytest("--override-ini", "python_files=unittest_*.py")
result.stdout.fnmatch_lines(["*1 passed in*"])
- def test_addopts_before_initini(self, monkeypatch, _config_for_test, _sys_snapshot):
+ def test_addopts_before_initini(
+ self, monkeypatch: MonkeyPatch, _config_for_test, _sys_snapshot
+ ) -> None:
cache_dir = ".custom_cache"
monkeypatch.setenv("PYTEST_ADDOPTS", "-o cache_dir=%s" % cache_dir)
config = _config_for_test
config._preparse([], addopts=True)
assert config._override_ini == ["cache_dir=%s" % cache_dir]
- def test_addopts_from_env_not_concatenated(self, monkeypatch, _config_for_test):
+ def test_addopts_from_env_not_concatenated(
+ self, monkeypatch: MonkeyPatch, _config_for_test
+ ) -> None:
"""PYTEST_ADDOPTS should not take values from normal args (#4265)."""
monkeypatch.setenv("PYTEST_ADDOPTS", "-o")
config = _config_for_test
in excinfo.value.args[0]
)
- def test_addopts_from_ini_not_concatenated(self, testdir):
+ def test_addopts_from_ini_not_concatenated(self, pytester: Pytester) -> None:
"""`addopts` from ini should not take values from normal args (#4265)."""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
addopts=-o
"""
)
- result = testdir.runpytest("cache_dir=ignored")
+ result = pytester.runpytest("cache_dir=ignored")
result.stderr.fnmatch_lines(
[
"%s: error: argument -o/--override-ini: expected one argument (via addopts config)"
- % (testdir.request.config._parser.optparser.prog,)
+ % (pytester._request.config._parser.optparser.prog,)
]
)
assert result.ret == _pytest.config.ExitCode.USAGE_ERROR
- def test_override_ini_does_not_contain_paths(self, _config_for_test, _sys_snapshot):
+ def test_override_ini_does_not_contain_paths(
+ self, _config_for_test, _sys_snapshot
+ ) -> None:
"""Check that -o no longer swallows all options after it (#3103)"""
config = _config_for_test
config._preparse(["-o", "cache_dir=/cache", "/some/test/path"])
assert config._override_ini == ["cache_dir=/cache"]
- def test_multiple_override_ini_options(self, testdir):
+ def test_multiple_override_ini_options(self, pytester: Pytester) -> None:
"""Ensure a file path following a '-o' option does not generate an error (#3103)"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"conftest.py": """
def pytest_addoption(parser):
""",
}
)
- result = testdir.runpytest("-o", "foo=1", "-o", "bar=0", "test_foo.py")
+ result = pytester.runpytest("-o", "foo=1", "-o", "bar=0", "test_foo.py")
assert "ERROR:" not in result.stderr.str()
result.stdout.fnmatch_lines(["collected 1 item", "*= 1 passed in *="])
-def test_help_via_addopts(testdir):
- testdir.makeini(
+def test_help_via_addopts(pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
addopts = --unknown-option-should-allow-for-help --help
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(
[
)
-def test_help_and_version_after_argument_error(testdir):
- testdir.makeconftest(
+def test_help_and_version_after_argument_error(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def validate(arg):
raise argparse.ArgumentTypeError("argerror")
)
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
addopts = --invalid-option-should-allow-for-help
"""
)
- result = testdir.runpytest("--help")
+ result = pytester.runpytest("--help")
result.stdout.fnmatch_lines(
[
"usage: *",
[
"ERROR: usage: *",
"%s: error: argument --invalid-option-should-allow-for-help: expected one argument"
- % (testdir.request.config._parser.optparser.prog,),
+ % (pytester._request.config._parser.optparser.prog,),
]
)
# Does not display full/default help.
assert "to see available markers type: pytest --markers" not in result.stdout.lines
assert result.ret == ExitCode.USAGE_ERROR
- result = testdir.runpytest("--version")
- result.stderr.fnmatch_lines(["pytest {}".format(pytest.__version__)])
+ result = pytester.runpytest("--version")
+ result.stderr.fnmatch_lines([f"pytest {pytest.__version__}"])
assert result.ret == ExitCode.USAGE_ERROR
-def test_help_formatter_uses_py_get_terminal_width(monkeypatch):
+def test_help_formatter_uses_py_get_terminal_width(monkeypatch: MonkeyPatch) -> None:
from _pytest.config.argparsing import DropShorterLongHelpFormatter
monkeypatch.setenv("COLUMNS", "90")
assert formatter._width == 42
-def test_config_does_not_load_blocked_plugin_from_args(testdir):
+def test_config_does_not_load_blocked_plugin_from_args(pytester: Pytester) -> None:
"""This tests that pytest's config setup handles "-p no:X"."""
- p = testdir.makepyfile("def test(capfd): pass")
- result = testdir.runpytest(str(p), "-pno:capture")
+ p = pytester.makepyfile("def test(capfd): pass")
+ result = pytester.runpytest(str(p), "-pno:capture")
result.stdout.fnmatch_lines(["E fixture 'capfd' not found"])
assert result.ret == ExitCode.TESTS_FAILED
- result = testdir.runpytest(str(p), "-pno:capture", "-s")
+ result = pytester.runpytest(str(p), "-pno:capture", "-s")
result.stderr.fnmatch_lines(["*: error: unrecognized arguments: -s"])
assert result.ret == ExitCode.USAGE_ERROR
-def test_invocation_args(testdir):
+def test_invocation_args(pytester: Pytester) -> None:
"""Ensure that Config.invocation_* arguments are correctly defined"""
class DummyPlugin:
pass
- p = testdir.makepyfile("def test(): pass")
+ p = pytester.makepyfile("def test(): pass")
plugin = DummyPlugin()
- rec = testdir.inline_run(p, "-v", plugins=[plugin])
+ rec = pytester.inline_run(p, "-v", plugins=[plugin])
calls = rec.getcalls("pytest_runtest_protocol")
assert len(calls) == 1
call = calls[0]
config = call.item.config
- assert config.invocation_params.args == (p, "-v")
- assert config.invocation_params.dir == Path(str(testdir.tmpdir))
+ assert config.invocation_params.args == (str(p), "-v")
+ assert config.invocation_params.dir == pytester.path
plugins = config.invocation_params.plugins
assert len(plugins) == 2
assert plugins[0] is plugin
- assert type(plugins[1]).__name__ == "Collect" # installed by testdir.inline_run()
+ assert type(plugins[1]).__name__ == "Collect" # installed by pytester.inline_run()
# args cannot be None
with pytest.raises(TypeError):
if x not in _pytest.config.essential_plugins
],
)
-def test_config_blocked_default_plugins(testdir, plugin):
+def test_config_blocked_default_plugins(pytester: Pytester, plugin: str) -> None:
if plugin == "debugging":
# Fixed in xdist master (after 1.27.0).
# https://github.com/pytest-dev/pytest-xdist/pull/422
else:
pytest.skip("does not work with xdist currently")
- p = testdir.makepyfile("def test(): pass")
- result = testdir.runpytest(str(p), "-pno:%s" % plugin)
+ p = pytester.makepyfile("def test(): pass")
+ result = pytester.runpytest(str(p), "-pno:%s" % plugin)
if plugin == "python":
assert result.ret == ExitCode.USAGE_ERROR
if plugin != "terminal":
result.stdout.fnmatch_lines(["* 1 passed in *"])
- p = testdir.makepyfile("def test(): assert 0")
- result = testdir.runpytest(str(p), "-pno:%s" % plugin)
+ p = pytester.makepyfile("def test(): assert 0")
+ result = pytester.runpytest(str(p), "-pno:%s" % plugin)
assert result.ret == ExitCode.TESTS_FAILED
if plugin != "terminal":
result.stdout.fnmatch_lines(["* 1 failed in *"])
class TestSetupCfg:
- def test_pytest_setup_cfg_unsupported(self, testdir):
- testdir.makefile(
+ def test_pytest_setup_cfg_unsupported(self, pytester: Pytester) -> None:
+ pytester.makefile(
".cfg",
setup="""
[pytest]
""",
)
with pytest.raises(pytest.fail.Exception):
- testdir.runpytest()
+ pytester.runpytest()
- def test_pytest_custom_cfg_unsupported(self, testdir):
- testdir.makefile(
+ def test_pytest_custom_cfg_unsupported(self, pytester: Pytester) -> None:
+ pytester.makefile(
".cfg",
custom="""
[pytest]
""",
)
with pytest.raises(pytest.fail.Exception):
- testdir.runpytest("-c", "custom.cfg")
+ pytester.runpytest("-c", "custom.cfg")
class TestPytestPluginsVariable:
- def test_pytest_plugins_in_non_top_level_conftest_unsupported(self, testdir):
- testdir.makepyfile(
+ def test_pytest_plugins_in_non_top_level_conftest_unsupported(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile(
**{
"subdirectory/conftest.py": """
pytest_plugins=['capture']
"""
}
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_func():
pass
"""
)
- res = testdir.runpytest()
+ res = pytester.runpytest()
assert res.ret == 2
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
- res.stdout.fnmatch_lines(
- [
- "*{msg}*".format(msg=msg),
- "*subdirectory{sep}conftest.py*".format(sep=os.sep),
- ]
- )
+ res.stdout.fnmatch_lines([f"*{msg}*", f"*subdirectory{os.sep}conftest.py*"])
@pytest.mark.parametrize("use_pyargs", [True, False])
def test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(
- self, testdir, use_pyargs
- ):
+ self, pytester: Pytester, use_pyargs: bool
+ ) -> None:
"""When using --pyargs, do not emit the warning about non-top-level conftest warnings (#4039, #4044)"""
files = {
"src/pkg/sub/conftest.py": "pytest_plugins=['capture']",
"src/pkg/sub/test_bar.py": "def test(): pass",
}
- testdir.makepyfile(**files)
- testdir.syspathinsert(testdir.tmpdir.join("src"))
+ pytester.makepyfile(**files)
+ pytester.syspathinsert(pytester.path.joinpath("src"))
args = ("--pyargs", "pkg") if use_pyargs else ()
- res = testdir.runpytest(*args)
+ res = pytester.runpytest(*args)
assert res.ret == (0 if use_pyargs else 2)
msg = (
msg
if use_pyargs:
assert msg not in res.stdout.str()
else:
- res.stdout.fnmatch_lines(["*{msg}*".format(msg=msg)])
+ res.stdout.fnmatch_lines([f"*{msg}*"])
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(
- self, testdir
- ):
- subdirectory = testdir.tmpdir.join("subdirectory")
+ self, pytester: Pytester
+ ) -> None:
+ subdirectory = pytester.path.joinpath("subdirectory")
subdirectory.mkdir()
- testdir.makeconftest(
+ pytester.makeconftest(
"""
pytest_plugins=['capture']
"""
)
- testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
+ pytester.path.joinpath("conftest.py").rename(
+ subdirectory.joinpath("conftest.py")
+ )
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_func():
pass
"""
)
- res = testdir.runpytest_subprocess()
+ res = pytester.runpytest_subprocess()
assert res.ret == 2
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
- res.stdout.fnmatch_lines(
- [
- "*{msg}*".format(msg=msg),
- "*subdirectory{sep}conftest.py*".format(sep=os.sep),
- ]
- )
+ res.stdout.fnmatch_lines([f"*{msg}*", f"*subdirectory{os.sep}conftest.py*"])
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives(
- self, testdir
- ):
- testdir.makepyfile(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile(
"def test_func(): pass",
**{
"subdirectory/conftest": "pass",
""",
},
)
- res = testdir.runpytest_subprocess()
+ res = pytester.runpytest_subprocess()
assert res.ret == 0
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
assert msg not in res.stdout.str()
-def test_conftest_import_error_repr(tmpdir):
+def test_conftest_import_error_repr(tmpdir: py.path.local) -> None:
"""`ConftestImportFailure` should use a short error message and readable
path to the failed conftest.py file."""
path = tmpdir.join("foo/conftest.py")
with pytest.raises(
ConftestImportFailure,
- match=re.escape("RuntimeError: some error (from {})".format(path)),
+ match=re.escape(f"RuntimeError: some error (from {path})"),
):
try:
raise RuntimeError("some error")
raise ConftestImportFailure(path, exc_info) from exc
-def test_strtobool():
+def test_strtobool() -> None:
assert _strtobool("YES")
assert not _strtobool("NO")
with pytest.raises(ValueError):
],
)
def test_parse_warning_filter(
- arg: str, escape: bool, expected: "Tuple[str, str, Type[Warning], str, int]"
+ arg: str, escape: bool, expected: Tuple[str, str, Type[Warning], str, int]
) -> None:
assert parse_warning_filter(arg, escape=escape) == expected
+import argparse
import os
import textwrap
+from pathlib import Path
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Optional
import py
import pytest
from _pytest.config import ExitCode
from _pytest.config import PytestPluginManager
-from _pytest.pathlib import Path
+from _pytest.monkeypatch import MonkeyPatch
from _pytest.pathlib import symlink_or_skip
+from _pytest.pytester import Pytester
+from _pytest.pytester import Testdir
-def ConftestWithSetinitial(path):
+def ConftestWithSetinitial(path) -> PytestPluginManager:
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
-def conftest_setinitial(conftest, args, confcutdir=None):
+def conftest_setinitial(
+ conftest: PytestPluginManager, args, confcutdir: Optional[py.path.local] = None
+) -> None:
class Namespace:
- def __init__(self):
+ def __init__(self) -> None:
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
self.pyargs = False
self.importmode = "prepend"
- conftest._set_initial_conftests(Namespace())
+ namespace = cast(argparse.Namespace, Namespace())
+ conftest._set_initial_conftests(namespace)
@pytest.mark.usefixtures("_sys_snapshot")
assert path.purebasename.startswith("conftest")
-def test_conftest_in_nonpkg_with_init(tmpdir, _sys_snapshot):
- tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
- tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
- tmpdir.ensure("adir-1.0/b/__init__.py")
- tmpdir.ensure("adir-1.0/__init__.py")
- ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
+def test_conftest_in_nonpkg_with_init(tmp_path: Path, _sys_snapshot) -> None:
+ tmp_path.joinpath("adir-1.0/b").mkdir(parents=True)
+ tmp_path.joinpath("adir-1.0/conftest.py").write_text("a=1 ; Directory = 3")
+ tmp_path.joinpath("adir-1.0/b/conftest.py").write_text("b=2 ; a = 1.5")
+ tmp_path.joinpath("adir-1.0/b/__init__.py").touch()
+ tmp_path.joinpath("adir-1.0/__init__.py").touch()
+ ConftestWithSetinitial(tmp_path.joinpath("adir-1.0", "b"))
-def test_doubledash_considered(testdir):
+def test_doubledash_considered(testdir: Testdir) -> None:
conf = testdir.mkdir("--option")
- conf.ensure("conftest.py")
+ conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
- values = conftest._getconftestmodules(conf, importmode="prepend")
+ values = conftest._getconftestmodules(py.path.local(conf), importmode="prepend")
assert len(values) == 1
-def test_issue151_load_all_conftests(testdir):
+def test_issue151_load_all_conftests(pytester: Pytester) -> None:
names = "code proj src".split()
for name in names:
- p = testdir.mkdir(name)
- p.ensure("conftest.py")
+ p = pytester.mkdir(name)
+ p.joinpath("conftest.py").touch()
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
assert len(d) == len(names)
-def test_conftest_global_import(testdir):
- testdir.makeconftest("x=3")
- p = testdir.makepyfile(
+def test_conftest_global_import(pytester: Pytester) -> None:
+ pytester.makeconftest("x=3")
+ p = pytester.makepyfile(
"""
import py, pytest
from _pytest.config import PytestPluginManager
assert conftest is mod2, (conftest, mod)
"""
)
- res = testdir.runpython(p)
+ res = pytester.runpython(p)
assert res.ret == 0
-def test_conftestcutdir(testdir):
+def test_conftestcutdir(testdir: Testdir) -> None:
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
assert len(values) == 0
values = conftest._getconftestmodules(conf.dirpath(), importmode="prepend")
assert len(values) == 0
- assert conf not in conftest._conftestpath2mod
+ assert Path(conf) not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf, importmode="prepend")
values = conftest._getconftestmodules(conf.dirpath(), importmode="prepend")
assert values[0].__file__.startswith(str(conf))
-def test_conftestcutdir_inplace_considered(testdir):
- conf = testdir.makeconftest("")
+def test_conftestcutdir_inplace_considered(pytester: Pytester) -> None:
+ conf = pytester.makeconftest("")
conftest = PytestPluginManager()
- conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
- values = conftest._getconftestmodules(conf.dirpath(), importmode="prepend")
+ conftest_setinitial(conftest, [conf.parent], confcutdir=py.path.local(conf.parent))
+ values = conftest._getconftestmodules(
+ py.path.local(conf.parent), importmode="prepend"
+ )
assert len(values) == 1
assert values[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", "test tests whatever .dotdir".split())
-def test_setinitial_conftest_subdirs(testdir, name):
- sub = testdir.mkdir(name)
- subconftest = sub.ensure("conftest.py")
+def test_setinitial_conftest_subdirs(pytester: Pytester, name: str) -> None:
+ sub = pytester.mkdir(name)
+ subconftest = sub.joinpath("conftest.py")
+ subconftest.touch()
conftest = PytestPluginManager()
- conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
- key = Path(str(subconftest)).resolve()
+ conftest_setinitial(conftest, [sub.parent], confcutdir=py.path.local(pytester.path))
+ key = subconftest.resolve()
if name not in ("whatever", ".dotdir"):
assert key in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
assert len(conftest._conftestpath2mod) == 0
-def test_conftest_confcutdir(testdir):
- testdir.makeconftest("assert 0")
- x = testdir.mkdir("x")
- x.join("conftest.py").write(
+def test_conftest_confcutdir(pytester: Pytester) -> None:
+ pytester.makeconftest("assert 0")
+ x = pytester.mkdir("x")
+ x.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_addoption(parser):
"""
)
)
- result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
+ result = pytester.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
result.stdout.no_fnmatch_line("*warning: could not load initial*")
-def test_conftest_symlink(testdir):
+def test_conftest_symlink(pytester: Pytester) -> None:
"""`conftest.py` discovery follows normal path resolution and does not resolve symlinks."""
# Structure:
# /real
# /symlinktests -> /real/app/tests (running at symlinktests should fail)
# /symlink -> /real (running at /symlink should work)
- real = testdir.tmpdir.mkdir("real")
- realtests = real.mkdir("app").mkdir("tests")
- symlink_or_skip(realtests, testdir.tmpdir.join("symlinktests"))
- symlink_or_skip(real, testdir.tmpdir.join("symlink"))
- testdir.makepyfile(
+ real = pytester.mkdir("real")
+ realtests = real.joinpath("app/tests")
+ realtests.mkdir(parents=True)
+ symlink_or_skip(realtests, pytester.path.joinpath("symlinktests"))
+ symlink_or_skip(real, pytester.path.joinpath("symlink"))
+ pytester.makepyfile(
**{
"real/app/tests/test_foo.py": "def test1(fixture): pass",
"real/conftest.py": textwrap.dedent(
)
# Should fail because conftest cannot be found from the link structure.
- result = testdir.runpytest("-vs", "symlinktests")
+ result = pytester.runpytest("-vs", "symlinktests")
result.stdout.fnmatch_lines(["*fixture 'fixture' not found*"])
assert result.ret == ExitCode.TESTS_FAILED
# Should not cause "ValueError: Plugin already registered" (#4174).
- result = testdir.runpytest("-vs", "symlink")
+ result = pytester.runpytest("-vs", "symlink")
assert result.ret == ExitCode.OK
-def test_conftest_symlink_files(testdir):
+def test_conftest_symlink_files(pytester: Pytester) -> None:
"""Symlinked conftest.py are found when pytest is executed in a directory with symlinked
files."""
- real = testdir.tmpdir.mkdir("real")
+ real = pytester.mkdir("real")
source = {
"app/test_foo.py": "def test1(fixture): pass",
"app/__init__.py": "",
"""
),
}
- testdir.makepyfile(**{"real/%s" % k: v for k, v in source.items()})
+ pytester.makepyfile(**{"real/%s" % k: v for k, v in source.items()})
# Create a build directory that contains symlinks to actual files
# but doesn't symlink actual directories.
- build = testdir.tmpdir.mkdir("build")
- build.mkdir("app")
+ build = pytester.mkdir("build")
+ build.joinpath("app").mkdir()
for f in source:
- symlink_or_skip(real.join(f), build.join(f))
- build.chdir()
- result = testdir.runpytest("-vs", "app/test_foo.py")
+ symlink_or_skip(real.joinpath(f), build.joinpath(f))
+ os.chdir(build)
+ result = pytester.runpytest("-vs", "app/test_foo.py")
result.stdout.fnmatch_lines(["*conftest_loaded*", "PASSED"])
assert result.ret == ExitCode.OK
os.path.normcase("x") != os.path.normcase("X"),
reason="only relevant for case insensitive file systems",
)
-def test_conftest_badcase(testdir):
+def test_conftest_badcase(pytester: Pytester) -> None:
"""Check conftest.py loading when directory casing is wrong (#5792)."""
- testdir.tmpdir.mkdir("JenkinsRoot").mkdir("test")
+ pytester.path.joinpath("JenkinsRoot/test").mkdir(parents=True)
source = {"setup.py": "", "test/__init__.py": "", "test/conftest.py": ""}
- testdir.makepyfile(**{"JenkinsRoot/%s" % k: v for k, v in source.items()})
+ pytester.makepyfile(**{"JenkinsRoot/%s" % k: v for k, v in source.items()})
- testdir.tmpdir.join("jenkinsroot/test").chdir()
- result = testdir.runpytest()
+ os.chdir(pytester.path.joinpath("jenkinsroot/test"))
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
-def test_conftest_uppercase(testdir):
+def test_conftest_uppercase(pytester: Pytester) -> None:
"""Check conftest.py whose qualified name contains uppercase characters (#5819)"""
source = {"__init__.py": "", "Foo/conftest.py": "", "Foo/__init__.py": ""}
- testdir.makepyfile(**source)
+ pytester.makepyfile(**source)
- testdir.tmpdir.chdir()
- result = testdir.runpytest()
+ os.chdir(pytester.path)
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
-def test_no_conftest(testdir):
- testdir.makeconftest("assert 0")
- result = testdir.runpytest("--noconftest")
+def test_no_conftest(pytester: Pytester) -> None:
+ pytester.makeconftest("assert 0")
+ result = pytester.runpytest("--noconftest")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.USAGE_ERROR
-def test_conftest_existing_junitxml(testdir):
- x = testdir.mkdir("tests")
- x.join("conftest.py").write(
+def test_conftest_existing_junitxml(pytester: Pytester) -> None:
+ x = pytester.mkdir("tests")
+ x.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_addoption(parser):
"""
)
)
- testdir.makefile(ext=".xml", junit="") # Writes junit.xml
- result = testdir.runpytest("-h", "--junitxml", "junit.xml")
+ pytester.makefile(ext=".xml", junit="") # Writes junit.xml
+ result = pytester.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
-def test_conftest_import_order(testdir, monkeypatch):
+def test_conftest_import_order(testdir: Testdir, monkeypatch: MonkeyPatch) -> None:
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
conftest = PytestPluginManager()
conftest._confcutdir = testdir.tmpdir
monkeypatch.setattr(conftest, "_importconftest", impct)
- assert conftest._getconftestmodules(sub, importmode="prepend") == [ct1, ct2]
+ mods = cast(
+ List[py.path.local],
+ conftest._getconftestmodules(py.path.local(sub), importmode="prepend"),
+ )
+ expected = [ct1, ct2]
+ assert mods == expected
-def test_fixture_dependency(testdir):
- ct1 = testdir.makeconftest("")
- ct1 = testdir.makepyfile("__init__.py")
- ct1.write("")
- sub = testdir.mkdir("sub")
- sub.join("__init__.py").write("")
- sub.join("conftest.py").write(
+def test_fixture_dependency(pytester: Pytester) -> None:
+ ct1 = pytester.makeconftest("")
+ ct1 = pytester.makepyfile("__init__.py")
+ ct1.write_text("")
+ sub = pytester.mkdir("sub")
+ sub.joinpath("__init__.py").write_text("")
+ sub.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- subsub = sub.mkdir("subsub")
- subsub.join("__init__.py").write("")
- subsub.join("test_bar.py").write(
+ subsub = sub.joinpath("subsub")
+ subsub.mkdir()
+ subsub.joinpath("__init__.py").write_text("")
+ subsub.joinpath("test_bar.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- result = testdir.runpytest("sub")
+ result = pytester.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_conftest_found_with_double_dash(testdir):
- sub = testdir.mkdir("sub")
- sub.join("conftest.py").write(
+def test_conftest_found_with_double_dash(pytester: Pytester) -> None:
+ sub = pytester.mkdir("sub")
+ sub.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_addoption(parser):
"""
)
)
- p = sub.join("test_hello.py")
- p.write("def test_hello(): pass")
- result = testdir.runpytest(str(p) + "::test_hello", "-h")
+ p = sub.joinpath("test_hello.py")
+ p.write_text("def test_hello(): pass")
+ result = pytester.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines(
"""
*--hello-world*
class TestConftestVisibility:
- def _setup_tree(self, testdir): # for issue616
+ def _setup_tree(self, pytester: Pytester) -> Dict[str, Path]: # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
- runner = testdir.mkdir("empty")
- package = testdir.mkdir("package")
+ runner = pytester.mkdir("empty")
+ package = pytester.mkdir("package")
- package.join("conftest.py").write(
+ package.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- package.join("test_pkgroot.py").write(
+ package.joinpath("test_pkgroot.py").write_text(
textwrap.dedent(
"""\
def test_pkgroot(fxtr):
)
)
- swc = package.mkdir("swc")
- swc.join("__init__.py").ensure()
- swc.join("conftest.py").write(
+ swc = package.joinpath("swc")
+ swc.mkdir()
+ swc.joinpath("__init__.py").touch()
+ swc.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- swc.join("test_with_conftest.py").write(
+ swc.joinpath("test_with_conftest.py").write_text(
textwrap.dedent(
"""\
def test_with_conftest(fxtr):
)
)
- snc = package.mkdir("snc")
- snc.join("__init__.py").ensure()
- snc.join("test_no_conftest.py").write(
+ snc = package.joinpath("snc")
+ snc.mkdir()
+ snc.joinpath("__init__.py").touch()
+ snc.joinpath("test_no_conftest.py").write_text(
textwrap.dedent(
"""\
def test_no_conftest(fxtr):
)
)
print("created directory structure:")
- tmppath = Path(str(testdir.tmpdir))
- for x in tmppath.rglob(""):
- print(" " + str(x.relative_to(tmppath)))
+ for x in pytester.path.rglob(""):
+ print(" " + str(x.relative_to(pytester.path)))
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
],
)
def test_parsefactories_relative_node_ids(
- self, testdir, chdir, testarg, expect_ntests_passed
- ):
+ self, pytester: Pytester, chdir: str, testarg: str, expect_ntests_passed: int
+ ) -> None:
"""#616"""
- dirs = self._setup_tree(testdir)
- print("pytest run in cwd: %s" % (dirs[chdir].relto(testdir.tmpdir)))
+ dirs = self._setup_tree(pytester)
+ print("pytest run in cwd: %s" % (dirs[chdir].relative_to(pytester.path)))
print("pytestarg : %s" % (testarg))
print("expected pass : %s" % (expect_ntests_passed))
- with dirs[chdir].as_cwd():
- reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
- reprec.assertoutcome(passed=expect_ntests_passed)
+ os.chdir(dirs[chdir])
+ reprec = pytester.inline_run(testarg, "-q", "--traceconfig")
+ reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize(
"confcutdir,passed,error", [(".", 2, 0), ("src", 1, 1), (None, 1, 1)]
)
-def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
+def test_search_conftest_up_to_inifile(
+ pytester: Pytester, confcutdir: str, passed: int, error: int
+) -> None:
"""Test that conftest files are detected only up to an ini file, unless
an explicit --confcutdir option is given.
"""
- root = testdir.tmpdir
- src = root.join("src").ensure(dir=1)
- src.join("pytest.ini").write("[pytest]")
- src.join("conftest.py").write(
+ root = pytester.path
+ src = root.joinpath("src")
+ src.mkdir()
+ src.joinpath("pytest.ini").write_text("[pytest]")
+ src.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
"""
)
)
- src.join("test_foo.py").write(
+ src.joinpath("test_foo.py").write_text(
textwrap.dedent(
"""\
def test_1(fix1):
"""
)
)
- root.join("conftest.py").write(
+ root.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
args = [str(src)]
if confcutdir:
- args = ["--confcutdir=%s" % root.join(confcutdir)]
- result = testdir.runpytest(*args)
+ args = ["--confcutdir=%s" % root.joinpath(confcutdir)]
+ result = pytester.runpytest(*args)
match = ""
if passed:
match += "*%d passed*" % passed
result.stdout.fnmatch_lines(match)
-def test_issue1073_conftest_special_objects(testdir):
- testdir.makeconftest(
+def test_issue1073_conftest_special_objects(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""\
class DontTouchMe(object):
def __getattr__(self, x):
x = DontTouchMe()
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test_some():
pass
"""
)
- res = testdir.runpytest()
+ res = pytester.runpytest()
assert res.ret == 0
-def test_conftest_exception_handling(testdir):
- testdir.makeconftest(
+def test_conftest_exception_handling(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""\
raise ValueError()
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
def test_some():
pass
"""
)
- res = testdir.runpytest()
+ res = pytester.runpytest()
assert res.ret == 4
assert "raise ValueError()" in [line.strip() for line in res.errlines]
-def test_hook_proxy(testdir):
+def test_hook_proxy(pytester: Pytester) -> None:
"""Session's gethookproxy() would cache conftests incorrectly (#2016).
It was decided to remove the cache altogether.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"root/demo-0/test_foo1.py": "def test1(): pass",
"root/demo-a/test_foo2.py": "def test1(): pass",
"root/demo-c/test_foo4.py": "def test1(): pass",
}
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
["*test_foo1.py*", "*test_foo3.py*", "*test_foo4.py*", "*3 passed*"]
)
-def test_required_option_help(testdir):
- testdir.makeconftest("assert 0")
- x = testdir.mkdir("x")
- x.join("conftest.py").write(
+def test_required_option_help(pytester: Pytester) -> None:
+ pytester.makeconftest("assert 0")
+ x = pytester.mkdir("x")
+ x.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_addoption(parser):
"""
)
)
- result = testdir.runpytest("-h", x)
+ result = pytester.runpytest("-h", x)
result.stdout.no_fnmatch_line("*argument --xyz is required*")
assert "general:" in result.stdout.str()
import os
import sys
+from typing import List
import _pytest._code
import pytest
from _pytest.debugging import _validate_usepdb_cls
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
try:
# Type ignored for Python <= 3.6.
@pytest.fixture(autouse=True)
def pdb_env(request):
- if "testdir" in request.fixturenames:
+ if "pytester" in request.fixturenames:
# Disable pdb++ with inner tests.
- testdir = request.getfixturevalue("testdir")
- testdir.monkeypatch.setenv("PDBPP_HIJACK_PDB", "0")
+ pytester = request.getfixturevalue("testdir")
+ pytester.monkeypatch.setenv("PDBPP_HIJACK_PDB", "0")
-def runpdb_and_get_report(testdir, source):
- p = testdir.makepyfile(source)
- result = testdir.runpytest_inprocess("--pdb", p)
- reports = result.reprec.getreports("pytest_runtest_logreport")
+def runpdb_and_get_report(pytester: Pytester, source: str):
+ p = pytester.makepyfile(source)
+ result = pytester.runpytest_inprocess("--pdb", p)
+ reports = result.reprec.getreports("pytest_runtest_logreport") # type: ignore[attr-defined]
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
-def custom_pdb_calls():
+def custom_pdb_calls() -> List[str]:
called = []
# install dummy debugger class and track which methods were called on it
monkeypatch.setattr(plugin, "post_mortem", mypdb)
return pdblist
- def test_pdb_on_fail(self, testdir, pdblist):
+ def test_pdb_on_fail(self, pytester: Pytester, pdblist) -> None:
rep = runpdb_and_get_report(
- testdir,
+ pytester,
"""
def test_func():
assert 0
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
- def test_pdb_on_xfail(self, testdir, pdblist):
+ def test_pdb_on_xfail(self, pytester: Pytester, pdblist) -> None:
rep = runpdb_and_get_report(
- testdir,
+ pytester,
"""
import pytest
@pytest.mark.xfail
assert "xfail" in rep.keywords
assert not pdblist
- def test_pdb_on_skip(self, testdir, pdblist):
+ def test_pdb_on_skip(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
- testdir,
+ pytester,
"""
import pytest
def test_func():
assert rep.skipped
assert len(pdblist) == 0
- def test_pdb_on_BdbQuit(self, testdir, pdblist):
+ def test_pdb_on_BdbQuit(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
- testdir,
+ pytester,
"""
import bdb
def test_func():
assert rep.failed
assert len(pdblist) == 0
- def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist):
+ def test_pdb_on_KeyboardInterrupt(self, pytester, pdblist) -> None:
rep = runpdb_and_get_report(
- testdir,
+ pytester,
"""
def test_func():
raise KeyboardInterrupt
child.wait()
assert not child.isalive()
- def test_pdb_unittest_postmortem(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_unittest_postmortem(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import unittest
class Blub(unittest.TestCase):
assert 0
"""
)
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ child = pytester.spawn_pytest(f"--pdb {p1}")
child.expect("Pdb")
child.sendline("p self.filename")
child.sendeof()
assert "debug.me" in rest
self.flush(child)
- def test_pdb_unittest_skip(self, testdir):
+ def test_pdb_unittest_skip(self, pytester: Pytester) -> None:
"""Test for issue #2137"""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
assert 0
"""
)
- child = testdir.spawn_pytest("-rs --pdb %s" % p1)
+ child = pytester.spawn_pytest(f"-rs --pdb {p1}")
child.expect("Skipping also with pdb active")
child.expect_exact("= 1 skipped in")
child.sendeof()
self.flush(child)
- def test_pdb_print_captured_stdout_and_stderr(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_print_captured_stdout_and_stderr(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
import sys
pass
"""
)
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ child = pytester.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("captured stderr")
assert "get rekt" not in rest
self.flush(child)
- def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_dont_print_empty_captured_stdout_and_stderr(
+ self, pytester: Pytester
+ ) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
assert False
"""
)
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ child = pytester.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
output = child.before.decode("utf8")
child.sendeof()
self.flush(child)
@pytest.mark.parametrize("showcapture", ["all", "no", "log"])
- def test_pdb_print_captured_logs(self, testdir, showcapture):
- p1 = testdir.makepyfile(
+ def test_pdb_print_captured_logs(self, pytester, showcapture: str) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
import logging
assert False
"""
)
- child = testdir.spawn_pytest(
- "--show-capture={} --pdb {}".format(showcapture, p1)
- )
+ child = pytester.spawn_pytest(f"--show-capture={showcapture} --pdb {p1}")
if showcapture in ("all", "log"):
child.expect("captured log")
child.expect("get rekt")
assert "1 failed" in rest
self.flush(child)
- def test_pdb_print_captured_logs_nologging(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_print_captured_logs_nologging(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
import logging
assert False
"""
)
- child = testdir.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1)
+ child = pytester.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1)
child.expect("get rekt")
output = child.before.decode("utf8")
assert "captured log" not in output
assert "1 failed" in rest
self.flush(child)
- def test_pdb_interaction_exception(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_interaction_exception(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def globalfunc():
pytest.raises(ValueError, globalfunc)
"""
)
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ child = pytester.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("Pdb")
child.expect("1 failed")
self.flush(child)
- def test_pdb_interaction_on_collection_issue181(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_interaction_on_collection_issue181(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
xxx
"""
)
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ child = pytester.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("Pdb")
child.sendline("c")
child.expect("1 error")
self.flush(child)
- def test_pdb_interaction_on_internal_error(self, testdir):
- testdir.makeconftest(
+ def test_pdb_interaction_on_internal_error(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_runtest_protocol():
0/0
"""
)
- p1 = testdir.makepyfile("def test_func(): pass")
- child = testdir.spawn_pytest("--pdb %s" % p1)
+ p1 = pytester.makepyfile("def test_func(): pass")
+ child = pytester.spawn_pytest("--pdb %s" % p1)
child.expect("Pdb")
# INTERNALERROR is only displayed once via terminal reporter.
child.sendeof()
self.flush(child)
- def test_pdb_prevent_ConftestImportFailure_hiding_exception(self, testdir):
- testdir.makepyfile("def test_func(): pass")
- sub_dir = testdir.tmpdir.join("ns").ensure_dir()
- sub_dir.join("conftest").new(ext=".py").write("import unknown")
- sub_dir.join("test_file").new(ext=".py").write("def test_func(): pass")
+ def test_pdb_prevent_ConftestImportFailure_hiding_exception(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile("def test_func(): pass")
+ sub_dir = pytester.path.joinpath("ns")
+ sub_dir.mkdir()
+ sub_dir.joinpath("conftest").with_suffix(".py").write_text(
+ "import unknown", "utf-8"
+ )
+ sub_dir.joinpath("test_file").with_suffix(".py").write_text(
+ "def test_func(): pass", "utf-8"
+ )
- result = testdir.runpytest_subprocess("--pdb", ".")
+ result = pytester.runpytest_subprocess("--pdb", ".")
result.stdout.fnmatch_lines(["-> import unknown"])
- def test_pdb_interaction_capturing_simple(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_interaction_capturing_simple(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def test_1():
assert 0
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect(r"test_1\(\)")
child.expect("i == 1")
child.expect("Pdb")
assert "hello17" in rest # out is captured
self.flush(child)
- def test_pdb_set_trace_kwargs(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_set_trace_kwargs(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def test_1():
assert 0
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("== my_header ==")
assert "PDB set_trace" not in child.before.decode()
child.expect("Pdb")
assert "hello17" in rest # out is captured
self.flush(child)
- def test_pdb_set_trace_interception(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_set_trace_interception(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pdb
def test_1():
pdb.set_trace()
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("q")
assert "BdbQuit" not in rest
self.flush(child)
- def test_pdb_and_capsys(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_and_capsys(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def test_1(capsys):
pytest.set_trace()
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.read()
self.flush(child)
- def test_pdb_with_caplog_on_pdb_invocation(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_with_caplog_on_pdb_invocation(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1(capsys, caplog):
import logging
assert 0
"""
)
- child = testdir.spawn_pytest("--pdb %s" % str(p1))
+ child = pytester.spawn_pytest("--pdb %s" % str(p1))
child.send("caplog.record_tuples\n")
child.expect_exact(
"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]"
child.read()
self.flush(child)
- def test_set_trace_capturing_afterwards(self, testdir):
- p1 = testdir.makepyfile(
+ def test_set_trace_capturing_afterwards(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pdb
def test_1():
assert 0
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.read()
self.flush(child)
- def test_pdb_interaction_doctest(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_interaction_doctest(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def function_1():
'''
'''
"""
)
- child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
+ child = pytester.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("Pdb")
assert "UNEXPECTED EXCEPTION: AssertionError()" in child.before.decode("utf8")
assert "1 failed" in rest
self.flush(child)
- def test_doctest_set_trace_quit(self, testdir):
- p1 = testdir.makepyfile(
+ def test_doctest_set_trace_quit(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def function_1():
'''
)
# NOTE: does not use pytest.set_trace, but Python's patched pdb,
# therefore "-s" is required.
- child = testdir.spawn_pytest("--doctest-modules --pdb -s %s" % p1)
+ child = pytester.spawn_pytest("--doctest-modules --pdb -s %s" % p1)
child.expect("Pdb")
child.sendline("q")
rest = child.read().decode("utf8")
assert "BdbQuit" not in rest
assert "UNEXPECTED EXCEPTION" not in rest
- def test_pdb_interaction_capturing_twice(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_interaction_capturing_twice(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def test_1():
assert 0
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
child.expect("test_1")
child.expect("x = 3")
assert "1 failed" in rest
self.flush(child)
- def test_pdb_with_injected_do_debug(self, testdir):
+ def test_pdb_with_injected_do_debug(self, pytester: Pytester) -> None:
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
self.__class__ in do_continue.
"""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
mytest="""
import pdb
import pytest
pytest.fail("expected_failure")
"""
)
- child = testdir.spawn_pytest("--pdbcls=mytest:CustomPdb %s" % str(p1))
+ child = pytester.spawn_pytest("--pdbcls=mytest:CustomPdb %s" % str(p1))
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
child.expect(r"\n\(Pdb")
child.sendline("debug foo()")
assert "AssertionError: unexpected_failure" not in rest
self.flush(child)
- def test_pdb_without_capture(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_without_capture(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def test_1():
pytest.set_trace()
"""
)
- child = testdir.spawn_pytest("-s %s" % p1)
+ child = pytester.spawn_pytest("-s %s" % p1)
child.expect(r">>> PDB set_trace >>>")
child.expect("Pdb")
child.sendline("c")
self.flush(child)
@pytest.mark.parametrize("capture_arg", ("", "-s", "-p no:capture"))
- def test_pdb_continue_with_recursive_debug(self, capture_arg, testdir):
+ def test_pdb_continue_with_recursive_debug(
+ self, capture_arg, pytester: Pytester
+ ) -> None:
"""Full coverage for do_debug without capturing.
This is very similar to test_pdb_interaction_continue_recursive in general,
but mocks out ``pdb.set_trace`` for providing more coverage.
"""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
try:
input = raw_input
set_trace()
"""
)
- child = testdir.spawn_pytest("--tb=short {} {}".format(p1, capture_arg))
+ child = pytester.spawn_pytest(f"--tb=short {p1} {capture_arg}")
child.expect("=== SET_TRACE ===")
before = child.before.decode("utf8")
if not capture_arg:
assert "> PDB continue >" in rest
assert "= 1 passed in" in rest
- def test_pdb_used_outside_test(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_used_outside_test(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
pytest.set_trace()
x = 5
"""
)
- child = testdir.spawn("{} {}".format(sys.executable, p1))
+ child = pytester.spawn(f"{sys.executable} {p1}")
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
- def test_pdb_used_in_generate_tests(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_used_in_generate_tests(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
def pytest_generate_tests(metafunc):
pass
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("x = 5")
child.expect("Pdb")
child.sendeof()
self.flush(child)
- def test_pdb_collection_failure_is_shown(self, testdir):
- p1 = testdir.makepyfile("xxx")
- result = testdir.runpytest_subprocess("--pdb", p1)
+ def test_pdb_collection_failure_is_shown(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("xxx")
+ result = pytester.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines(
["E NameError: *xxx*", "*! *Exit: Quitting debugger !*"] # due to EOF
)
@pytest.mark.parametrize("post_mortem", (False, True))
- def test_enter_leave_pdb_hooks_are_called(self, post_mortem, testdir):
- testdir.makeconftest(
+ def test_enter_leave_pdb_hooks_are_called(
+ self, post_mortem, pytester: Pytester
+ ) -> None:
+ pytester.makeconftest(
"""
mypdb = None
assert mypdb.set_attribute == "bar"
"""
)
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
import pytest
"""
)
if post_mortem:
- child = testdir.spawn_pytest(str(p1) + " --pdb -s -k test_post_mortem")
+ child = pytester.spawn_pytest(str(p1) + " --pdb -s -k test_post_mortem")
else:
- child = testdir.spawn_pytest(str(p1) + " -k test_set_trace")
+ child = pytester.spawn_pytest(str(p1) + " -k test_set_trace")
child.expect("enter_pdb_hook")
child.sendline("c")
if post_mortem:
assert "1 failed" in rest
self.flush(child)
- def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
- p1 = testdir.makepyfile("""xxx """)
- result = testdir.runpytest_inprocess("--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
+ def test_pdb_custom_cls(
+ self, pytester: Pytester, custom_pdb_calls: List[str]
+ ) -> None:
+ p1 = pytester.makepyfile("""xxx """)
+ result = pytester.runpytest_inprocess(
+ "--pdb", "--pdbcls=_pytest:_CustomPdb", p1
+ )
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == ["init", "reset", "interaction"]
- def test_pdb_custom_cls_invalid(self, testdir):
- result = testdir.runpytest_inprocess("--pdbcls=invalid")
+ def test_pdb_custom_cls_invalid(self, pytester: Pytester) -> None:
+ result = pytester.runpytest_inprocess("--pdbcls=invalid")
result.stderr.fnmatch_lines(
[
"*: error: argument --pdbcls: 'invalid' is not in the format 'modname:classname'"
assert _validate_usepdb_cls("pdb:DoesNotExist") == ("pdb", "DoesNotExist")
- def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
- p1 = testdir.makepyfile("""xxx """)
- result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
+ def test_pdb_custom_cls_without_pdb(
+ self, pytester: Pytester, custom_pdb_calls: List[str]
+ ) -> None:
+ p1 = pytester.makepyfile("""xxx """)
+ result = pytester.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
assert custom_pdb_calls == []
- def test_pdb_custom_cls_with_set_trace(self, testdir, monkeypatch):
- testdir.makepyfile(
+ def test_pdb_custom_cls_with_set_trace(
+ self, pytester: Pytester, monkeypatch: MonkeyPatch,
+ ) -> None:
+ pytester.makepyfile(
custom_pdb="""
class CustomPdb(object):
def __init__(self, *args, **kwargs):
print('custom set_trace>')
"""
)
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
import pytest
pytest.set_trace(skip=['foo.*'])
"""
)
- monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir))
- child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
+ monkeypatch.setenv("PYTHONPATH", str(pytester.path))
+ child = pytester.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect("__init__")
child.expect("custom set_trace>")
class TestDebuggingBreakpoints:
- def test_supports_breakpoint_module_global(self):
- """
- Test that supports breakpoint global marks on Python 3.7+ and not on
- CPython 3.5, 2.7
- """
+ def test_supports_breakpoint_module_global(self) -> None:
+ """Test that supports breakpoint global marks on Python 3.7+."""
if sys.version_info >= (3, 7):
assert SUPPORTS_BREAKPOINT_BUILTIN is True
- if sys.version_info.major == 3 and sys.version_info.minor == 5:
- assert SUPPORTS_BREAKPOINT_BUILTIN is False # type: ignore[comparison-overlap]
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
@pytest.mark.parametrize("arg", ["--pdb", ""])
- def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg):
+ def test_sys_breakpointhook_configure_and_unconfigure(
+ self, pytester: Pytester, arg: str
+ ) -> None:
"""
Test that sys.breakpointhook is set to the custom Pdb class once configured, test that
hook is reset to system value once pytest has been unconfigured
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import sys
from pytest import hookimpl
assert sys.breakpointhook == pytestPDB.set_trace
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
- result = testdir.runpytest_subprocess(*args)
+ result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
- def test_pdb_custom_cls(self, testdir, custom_debugger_hook):
- p1 = testdir.makepyfile(
+ def test_pdb_custom_cls(self, pytester: Pytester, custom_debugger_hook) -> None:
+ p1 = pytester.makepyfile(
"""
def test_nothing():
breakpoint()
"""
)
- result = testdir.runpytest_inprocess(
+ result = pytester.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomDebugger", p1
)
result.stdout.fnmatch_lines(["*CustomDebugger*", "*1 passed*"])
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
- def test_environ_custom_class(self, testdir, custom_debugger_hook, arg):
- testdir.makeconftest(
+ def test_environ_custom_class(
+ self, pytester: Pytester, custom_debugger_hook, arg: str
+ ) -> None:
+ pytester.makeconftest(
"""
import os
import sys
assert sys.breakpointhook is _pytest._CustomDebugger.set_trace
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_nothing(): pass
"""
)
args = (arg,) if arg else ()
- result = testdir.runpytest_subprocess(*args)
+ result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*1 passed in *"])
@pytest.mark.skipif(
not _ENVIRON_PYTHONBREAKPOINT == "",
reason="Requires breakpoint() default value",
)
- def test_sys_breakpoint_interception(self, testdir):
- p1 = testdir.makepyfile(
+ def test_sys_breakpoint_interception(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
breakpoint()
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("quit")
@pytest.mark.skipif(
not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin"
)
- def test_pdb_not_altered(self, testdir):
- p1 = testdir.makepyfile(
+ def test_pdb_not_altered(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
import pdb
def test_1():
assert 0
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("c")
class TestTraceOption:
- def test_trace_sets_breakpoint(self, testdir):
- p1 = testdir.makepyfile(
+ def test_trace_sets_breakpoint(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_1():
assert True
pass
"""
)
- child = testdir.spawn_pytest("--trace " + str(p1))
+ child = pytester.spawn_pytest("--trace " + str(p1))
child.expect("test_1")
child.expect("Pdb")
child.sendline("c")
assert "Exit: Quitting debugger" not in child.before.decode("utf8")
TestPDB.flush(child)
- def test_trace_with_parametrize_handles_shared_fixtureinfo(self, testdir):
- p1 = testdir.makepyfile(
+ def test_trace_with_parametrize_handles_shared_fixtureinfo(
+ self, pytester: Pytester
+ ) -> None:
+ p1 = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('myparam', [1,2])
assert request.function.__name__ == "test_func_kw"
"""
)
- child = testdir.spawn_pytest("--trace " + str(p1))
+ child = pytester.spawn_pytest("--trace " + str(p1))
for func, argname in [
("test_1", "myparam"),
("test_func", "func"),
child.expect_exact(func)
child.expect_exact("Pdb")
child.sendline("args")
- child.expect_exact("{} = 1\r\n".format(argname))
+ child.expect_exact(f"{argname} = 1\r\n")
child.expect_exact("Pdb")
child.sendline("c")
child.expect_exact("Pdb")
child.sendline("args")
- child.expect_exact("{} = 2\r\n".format(argname))
+ child.expect_exact(f"{argname} = 2\r\n")
child.expect_exact("Pdb")
child.sendline("c")
child.expect_exact("> PDB continue (IO-capturing resumed) >")
TestPDB.flush(child)
-def test_trace_after_runpytest(testdir):
+def test_trace_after_runpytest(pytester: Pytester) -> None:
"""Test that debugging's pytest_configure is re-entrant."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
from _pytest.debugging import pytestPDB
- def test_outer(testdir):
+ def test_outer(pytester) -> None:
assert len(pytestPDB._saved) == 1
- testdir.makepyfile(
+ pytester.makepyfile(
\"""
from _pytest.debugging import pytestPDB
\"""
)
- result = testdir.runpytest("-s", "-k", "test_inner")
+ result = pytester.runpytest("-s", "-k", "test_inner")
assert result.ret == 0
assert len(pytestPDB._saved) == 1
"""
)
- result = testdir.runpytest_subprocess("-s", "-p", "pytester", str(p1))
+ result = pytester.runpytest_subprocess("-s", "-p", "pytester", str(p1))
result.stdout.fnmatch_lines(["test_inner_end"])
assert result.ret == 0
-def test_quit_with_swallowed_SystemExit(testdir):
+def test_quit_with_swallowed_SystemExit(pytester: Pytester) -> None:
"""Test that debugging's pytest_configure is re-entrant."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
def call_pdb_set_trace():
__import__('pdb').set_trace()
pass
"""
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
child.expect("Pdb")
child.sendline("q")
child.expect_exact("Exit: Quitting debugger")
@pytest.mark.parametrize("fixture", ("capfd", "capsys"))
-def test_pdb_suspends_fixture_capturing(testdir, fixture):
+def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> None:
"""Using "-s" with pytest should suspend/resume fixture capturing."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
def test_inner({fixture}):
import sys
)
)
- child = testdir.spawn_pytest(str(p1) + " -s")
+ child = pytester.spawn_pytest(str(p1) + " -s")
child.expect("Pdb")
before = child.before.decode("utf8")
assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest
-def test_pdbcls_via_local_module(testdir):
+def test_pdbcls_via_local_module(pytester: Pytester) -> None:
"""It should be imported in pytest_configure or later only."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
def test():
print("before_set_trace")
print("runcall_called", args, kwds)
""",
)
- result = testdir.runpytest(
+ result = pytester.runpytest(
str(p1), "--pdbcls=really.invalid:Value", syspathinsert=True
)
result.stdout.fnmatch_lines(
)
assert result.ret == 1
- result = testdir.runpytest(
+ result = pytester.runpytest(
str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", syspathinsert=True
)
assert result.ret == 0
result.stdout.fnmatch_lines(["*set_trace_called*", "* 1 passed in *"])
# Ensure that it also works with --trace.
- result = testdir.runpytest(
+ result = pytester.runpytest(
str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", "--trace", syspathinsert=True
)
assert result.ret == 0
result.stdout.fnmatch_lines(["*runcall_called*", "* 1 passed in *"])
-def test_raises_bdbquit_with_eoferror(testdir):
+def test_raises_bdbquit_with_eoferror(pytester: Pytester) -> None:
"""It is not guaranteed that DontReadFromInput's read is called."""
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
def input_without_read(*args, **kwargs):
raise EOFError()
__import__('pdb').set_trace()
"""
)
- result = testdir.runpytest(str(p1))
+ result = pytester.runpytest(str(p1))
result.stdout.fnmatch_lines(["E *BdbQuit", "*= 1 failed in*"])
assert result.ret == 1
-def test_pdb_wrapper_class_is_reused(testdir):
- p1 = testdir.makepyfile(
+def test_pdb_wrapper_class_is_reused(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test():
__import__("pdb").set_trace()
print("set_trace_called", args)
""",
)
- result = testdir.runpytest(str(p1), "--pdbcls=mypdb:MyPdb", syspathinsert=True)
+ result = pytester.runpytest(str(p1), "--pdbcls=mypdb:MyPdb", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*set_trace_called*", "*set_trace_called*", "* 1 passed in *"]
from typing import Callable
from typing import Optional
+import py
+
import pytest
-from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _is_setup_py
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
+from _pytest.pytester import Pytester
class TestDoctests:
- def test_collect_testtextfile(self, testdir):
- w = testdir.maketxtfile(whatever="")
- checkfile = testdir.maketxtfile(
+ def test_collect_testtextfile(self, pytester: Pytester):
+ w = pytester.maketxtfile(whatever="")
+ checkfile = pytester.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
"""
)
- for x in (testdir.tmpdir, checkfile):
+ for x in (pytester.path, checkfile):
# print "checking that %s returns custom items" % (x,)
- items, reprec = testdir.inline_genitems(x)
+ items, reprec = pytester.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
- items, reprec = testdir.inline_genitems(w)
+ items, reprec = pytester.inline_genitems(w)
assert len(items) == 0
- def test_collect_module_empty(self, testdir):
- path = testdir.makepyfile(whatever="#")
- for p in (path, testdir.tmpdir):
- items, reprec = testdir.inline_genitems(p, "--doctest-modules")
+ def test_collect_module_empty(self, pytester: Pytester):
+ path = pytester.makepyfile(whatever="#")
+ for p in (path, pytester.path):
+ items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
- def test_collect_module_single_modulelevel_doctest(self, testdir):
- path = testdir.makepyfile(whatever='""">>> pass"""')
- for p in (path, testdir.tmpdir):
- items, reprec = testdir.inline_genitems(p, "--doctest-modules")
+ def test_collect_module_single_modulelevel_doctest(self, pytester: Pytester):
+ path = pytester.makepyfile(whatever='""">>> pass"""')
+ for p in (path, pytester.path):
+ items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
- def test_collect_module_two_doctest_one_modulelevel(self, testdir):
- path = testdir.makepyfile(
+ def test_collect_module_two_doctest_one_modulelevel(self, pytester: Pytester):
+ path = pytester.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
- for p in (path, testdir.tmpdir):
- items, reprec = testdir.inline_genitems(p, "--doctest-modules")
+ for p in (path, pytester.path):
+ items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
- def test_collect_module_two_doctest_no_modulelevel(self, testdir):
- path = testdir.makepyfile(
- whatever="""
+ @pytest.mark.parametrize("filename", ["__init__", "whatever"])
+ def test_collect_module_two_doctest_no_modulelevel(
+ self, pytester: Pytester, filename: str,
+ ) -> None:
+ path = pytester.makepyfile(
+ **{
+ filename: """
'# Empty'
def my_func():
">>> magic = 42 "
# This is another function
>>> import os # this one does have a doctest
'''
- """
+ """,
+ },
)
- for p in (path, testdir.tmpdir):
- items, reprec = testdir.inline_genitems(p, "--doctest-modules")
+ for p in (path, pytester.path):
+ items, reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
- def test_simple_doctestfile(self, testdir):
- p = testdir.maketxtfile(
+ def test_simple_doctestfile(self, pytester: Pytester):
+ p = pytester.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(failed=1)
- def test_new_pattern(self, testdir):
- p = testdir.maketxtfile(
+ def test_new_pattern(self, pytester: Pytester):
+ p = pytester.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
- reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
- def test_multiple_patterns(self, testdir):
+ def test_multiple_patterns(self, pytester: Pytester):
"""Test support for multiple --doctest-glob arguments (#1255)."""
- testdir.maketxtfile(
+ pytester.maketxtfile(
xdoc="""
>>> 1
1
"""
)
- testdir.makefile(
+ pytester.makefile(
".foo",
test="""
>>> 1
1
""",
)
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
- assert {x.basename for x in testdir.tmpdir.listdir()} == expected
+ assert {x.name for x in pytester.path.iterdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
- result = testdir.runpytest(*args)
+ result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
- def test_encoding(self, testdir, test_string, encoding):
+ def test_encoding(self, pytester, test_string, encoding):
"""Test support for doctest_encoding ini option."""
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
test_string, repr(test_string)
)
- testdir._makefile(".txt", [doctest], {}, encoding=encoding)
+ fn = pytester.path / "test_encoding.txt"
+ fn.write_text(doctest, encoding=encoding)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
- def test_doctest_unexpected_exception(self, testdir):
- testdir.maketxtfile(
+ def test_doctest_unexpected_exception(self, pytester: Pytester):
+ pytester.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"test_doctest_unexpected_exception.txt F *",
consecutive=True,
)
- def test_doctest_outcomes(self, testdir):
- testdir.maketxtfile(
+ def test_doctest_outcomes(self, pytester: Pytester):
+ pytester.maketxtfile(
test_skip="""
>>> 1
1
bar
""",
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"collected 3 items",
]
)
- def test_docstring_partial_context_around_error(self, testdir):
+ def test_docstring_partial_context_around_error(self, pytester: Pytester):
"""Test that we show some context before the actual line of a failing
doctest.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
'''
def foo():
"""
"""
'''
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
- def test_docstring_full_context_around_error(self, testdir):
+ def test_docstring_full_context_around_error(self, pytester: Pytester):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
'''
def foo():
"""
"""
'''
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
]
)
- def test_doctest_linedata_missing(self, testdir):
- testdir.tmpdir.join("hello.py").write(
+ def test_doctest_linedata_missing(self, pytester: Pytester):
+ pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
class Fun(object):
"""
)
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
["*hello*", "006*>>> 1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*"]
)
- def test_doctest_linedata_on_property(self, testdir):
- testdir.makepyfile(
+ def test_doctest_linedata_on_property(self, pytester: Pytester):
+ pytester.makepyfile(
"""
class Sample(object):
@property
return 'something'
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
]
)
- def test_doctest_no_linedata_on_overriden_property(self, testdir):
- testdir.makepyfile(
+ def test_doctest_no_linedata_on_overriden_property(self, pytester: Pytester):
+ pytester.makepyfile(
"""
class Sample(object):
@property
some_property = property(some_property.__get__, None, None, some_property.__doc__)
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
]
)
- def test_doctest_unex_importerror_only_txt(self, testdir):
- testdir.maketxtfile(
+ def test_doctest_unex_importerror_only_txt(self, pytester: Pytester):
+ pytester.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
- "*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
- "{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
+ "*UNEXPECTED*ModuleNotFoundError*",
+ "ModuleNotFoundError: No module named *asdal*",
]
)
- def test_doctest_unex_importerror_with_module(self, testdir):
- testdir.tmpdir.join("hello.py").write(
+ def test_doctest_unex_importerror_with_module(self, pytester: Pytester):
+ pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
- testdir.maketxtfile(
+ pytester.maketxtfile(
"""
>>> import hello
>>>
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
- "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
+ "*ModuleNotFoundError: No module named *asdals*",
"*Interrupted: 1 error during collection*",
]
)
- def test_doctestmodule(self, testdir):
- p = testdir.makepyfile(
+ def test_doctestmodule(self, pytester: Pytester):
+ p = pytester.makepyfile(
"""
'''
>>> x = 1
'''
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
- def test_doctestmodule_external_and_issue116(self, testdir):
- p = testdir.mkpydir("hello")
- p.join("__init__.py").write(
+ def test_doctestmodule_external_and_issue116(self, pytester: Pytester):
+ p = pytester.mkpydir("hello")
+ p.joinpath("__init__.py").write_text(
textwrap.dedent(
"""\
def somefunc():
"""
)
)
- result = testdir.runpytest(p, "--doctest-modules")
+ result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
]
)
- def test_txtfile_failing(self, testdir):
- p = testdir.maketxtfile(
+ def test_txtfile_failing(self, pytester: Pytester):
+ p = pytester.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
- result = testdir.runpytest(p, "-s")
+ result = pytester.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
]
)
- def test_txtfile_with_fixtures(self, testdir):
- p = testdir.maketxtfile(
+ def test_txtfile_with_fixtures(self, pytester: Pytester):
+ p = pytester.maketxtfile(
"""
- >>> dir = getfixture('tmpdir')
- >>> type(dir).__name__
- 'LocalPath'
+ >>> p = getfixture('tmp_path')
+ >>> p.is_dir()
+ True
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
- def test_txtfile_with_usefixtures_in_ini(self, testdir):
- testdir.makeini(
+ def test_txtfile_with_usefixtures_in_ini(self, pytester: Pytester):
+ pytester.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
@pytest.fixture
"""
)
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
- def test_doctestmodule_with_fixtures(self, testdir):
- p = testdir.makepyfile(
+ def test_doctestmodule_with_fixtures(self, pytester: Pytester):
+ p = pytester.makepyfile(
"""
'''
- >>> dir = getfixture('tmpdir')
- >>> type(dir).__name__
- 'LocalPath'
+ >>> p = getfixture('tmp_path')
+ >>> p.is_dir()
+ True
'''
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
- def test_doctestmodule_three_tests(self, testdir):
- p = testdir.makepyfile(
+ def test_doctestmodule_three_tests(self, pytester: Pytester):
+ p = pytester.makepyfile(
"""
'''
- >>> dir = getfixture('tmpdir')
- >>> type(dir).__name__
- 'LocalPath'
+ >>> p = getfixture('tmp_path')
+ >>> p.is_dir()
+ True
'''
def my_func():
'''
'''
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
- def test_doctestmodule_two_tests_one_fail(self, testdir):
- p = testdir.makepyfile(
+ def test_doctestmodule_two_tests_one_fail(self, pytester: Pytester):
+ p = pytester.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
- def test_ignored_whitespace(self, testdir):
- testdir.makeini(
+ def test_ignored_whitespace(self, pytester: Pytester):
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
class MyClass(object):
'''
pass
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
- def test_non_ignored_whitespace(self, testdir):
- testdir.makeini(
+ def test_non_ignored_whitespace(self, pytester: Pytester):
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
class MyClass(object):
'''
pass
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
- def test_ignored_whitespace_glob(self, testdir):
- testdir.makeini(
+ def test_ignored_whitespace_glob(self, pytester: Pytester):
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
- reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
- def test_non_ignored_whitespace_glob(self, testdir):
- testdir.makeini(
+ def test_non_ignored_whitespace_glob(self, pytester: Pytester):
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
- reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
- def test_contains_unicode(self, testdir):
+ def test_contains_unicode(self, pytester: Pytester):
"""Fix internal error with docstrings containing non-ascii characters."""
- testdir.makepyfile(
+ pytester.makepyfile(
'''\
def foo():
"""
"""
'''
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
- def test_ignore_import_errors_on_doctest(self, testdir):
- p = testdir.makepyfile(
+ def test_ignore_import_errors_on_doctest(self, pytester: Pytester):
+ p = pytester.makepyfile(
"""
import asdf
"""
)
- reprec = testdir.inline_run(
+ reprec = pytester.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
- def test_junit_report_for_doctest(self, testdir):
+ def test_junit_report_for_doctest(self, pytester: Pytester):
"""#713: Fix --junit-xml option when used with --doctest-modules."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def foo():
'''
pass
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
+ reprec = pytester.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
- def test_unicode_doctest(self, testdir):
+ def test_unicode_doctest(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
test_unicode_doctest="""
.. doctest::
1
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
- def test_unicode_doctest_module(self, testdir):
+ def test_unicode_doctest_module(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
return "único"
"""
)
- result = testdir.runpytest(p, "--doctest-modules")
+ result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
- def test_print_unicode_value(self, testdir):
+ def test_print_unicode_value(self, pytester: Pytester):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
åéîøü
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
- def test_reportinfo(self, testdir):
+ def test_reportinfo(self, pytester: Pytester):
"""Make sure that DoctestItem.reportinfo() returns lineno."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
test_reportinfo="""
def foo(x):
'''
return 'c'
"""
)
- items, reprec = testdir.inline_genitems(p, "--doctest-modules")
+ items, reprec = pytester.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
- def test_valid_setup_py(self, testdir):
+ def test_valid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
)
"""
)
- result = testdir.runpytest(p, "--doctest-modules")
+ result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
- def test_invalid_setup_py(self, testdir):
+ def test_invalid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
- result = testdir.runpytest(p, "--doctest-modules")
+ result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
- def test_allow_unicode(self, testdir, config_mode):
+ def test_allow_unicode(self, pytester, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
else:
comment = "#doctest: +ALLOW_UNICODE"
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
comment=comment
)
)
- testdir.makepyfile(
+ pytester.makepyfile(
foo="""
def foo():
'''
comment=comment
)
)
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
- def test_allow_bytes(self, testdir, config_mode):
+ def test_allow_bytes(self, pytester, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
else:
comment = "#doctest: +ALLOW_BYTES"
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
comment=comment
)
)
- testdir.makepyfile(
+ pytester.makepyfile(
foo="""
def foo():
'''
comment=comment
)
)
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
- def test_unicode_string(self, testdir):
+ def test_unicode_string(self, pytester: Pytester):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
- def test_bytes_literal(self, testdir):
+ def test_bytes_literal(self, pytester: Pytester):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
- def test_number_precision(self, testdir, config_mode):
+ def test_number_precision(self, pytester, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
else:
comment = "#doctest: +NUMBER"
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
Scalars:
comment=comment
)
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
- pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail), # type: ignore
+ pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
- def test_number_non_matches(self, testdir, expression, output):
- testdir.maketxtfile(
+ def test_number_non_matches(self, pytester, expression, output):
+ pytester.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
expression=expression, output=output
)
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=0, failed=1)
- def test_number_and_allow_unicode(self, testdir):
- testdir.maketxtfile(
+ def test_number_and_allow_unicode(self, pytester: Pytester):
+ pytester.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
T(a=0.233, b=u'str', c='bytes')
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
"""
@pytest.fixture(params=["text", "module"])
- def makedoctest(self, testdir, request):
+ def makedoctest(self, pytester, request):
def makeit(doctest):
mode = request.param
if mode == "text":
- testdir.maketxtfile(doctest)
+ pytester.maketxtfile(doctest)
else:
assert mode == "module"
- testdir.makepyfile('"""\n%s"""' % doctest)
+ pytester.makepyfile('"""\n%s"""' % doctest)
return makeit
- def test_one_skipped(self, testdir, makedoctest):
+ def test_one_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
4
"""
)
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
- def test_one_skipped_failed(self, testdir, makedoctest):
+ def test_one_skipped_failed(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
200
"""
)
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
- def test_all_skipped(self, testdir, makedoctest):
+ def test_all_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
200
"""
)
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
- def test_vacuous_all_skipped(self, testdir, makedoctest):
+ def test_vacuous_all_skipped(self, pytester, makedoctest):
makedoctest("")
- reprec = testdir.inline_run("--doctest-modules")
+ reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
- def test_continue_on_failure(self, testdir):
- testdir.maketxtfile(
+ def test_continue_on_failure(self, pytester: Pytester):
+ pytester.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
>>> i + 1
"""
)
- result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
+ result = pytester.runpytest(
+ "--doctest-modules", "--doctest-continue-on-failure"
+ )
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
SCOPES = ["module", "session", "class", "function"]
- def test_doctest_module_session_fixture(self, testdir):
+ def test_doctest_module_session_fixture(self, pytester: Pytester):
"""Test that session fixtures are initialized for doctest modules (#768)."""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
import sys
del sys.pytest_session_data
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
foo="""
import sys
'''
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
- def test_fixture_scopes(self, testdir, scope, enable_doctest):
+ def test_fixture_scopes(self, pytester, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
scope=scope
)
)
- testdir.makepyfile(
+ pytester.makepyfile(
test_1='''
def test_foo():
"""
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
- result = testdir.runpytest(*params)
+ result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
- self, testdir, scope, autouse, use_fixture_in_doctest
+ self, pytester, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
)
)
if use_fixture_in_doctest:
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
- def test_auto_use_request_attributes(self, testdir, scope):
+ def test_auto_use_request_attributes(self, pytester, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
scope=scope
)
)
- testdir.maketxtfile(
+ pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
- def test_namespace_doctestfile(self, testdir, scope):
+ def test_namespace_doctestfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
import contextlib
scope=scope
)
)
- p = testdir.maketxtfile(
+ p = pytester.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
- def test_namespace_pyfile(self, testdir, scope):
+ def test_namespace_pyfile(self, pytester, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
import contextlib
scope=scope
)
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def foo():
'''
'''
"""
)
- reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
- def _run_doctest_report(self, testdir, format):
- testdir.makepyfile(
+ def _run_doctest_report(self, pytester, format):
+ pytester.makepyfile(
"""
def foo():
'''
'2 3 6')
"""
)
- return testdir.runpytest("--doctest-modules", "--doctest-report", format)
+ return pytester.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
- def test_doctest_report_udiff(self, testdir, format):
- result = self._run_doctest_report(testdir, format)
+ def test_doctest_report_udiff(self, pytester, format):
+ result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
- def test_doctest_report_cdiff(self, testdir):
- result = self._run_doctest_report(testdir, "cdiff")
+ def test_doctest_report_cdiff(self, pytester: Pytester):
+ result = self._run_doctest_report(pytester, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
]
)
- def test_doctest_report_ndiff(self, testdir):
- result = self._run_doctest_report(testdir, "ndiff")
+ def test_doctest_report_ndiff(self, pytester: Pytester):
+ result = self._run_doctest_report(pytester, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
- def test_doctest_report_none_or_only_first_failure(self, testdir, format):
- result = self._run_doctest_report(testdir, format)
+ def test_doctest_report_none_or_only_first_failure(self, pytester, format):
+ result = self._run_doctest_report(pytester, format)
result.stdout.fnmatch_lines(
[
"Expected:",
]
)
- def test_doctest_report_invalid(self, testdir):
- result = self._run_doctest_report(testdir, "obviously_invalid_format")
+ def test_doctest_report_invalid(self, pytester: Pytester):
+ result = self._run_doctest_report(pytester, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
-def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
+def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, pytester: Pytester):
pytest.importorskip(mock_module)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
from {mock_module} import call
class Example(object):
mock_module=mock_module
)
)
- result = testdir.runpytest("--doctest-modules")
+ result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
assert inspect.unwrap.__module__ == "inspect"
-def test_is_setup_py_not_named_setup_py(tmpdir):
- not_setup_py = tmpdir.join("not_setup.py")
- not_setup_py.write('from setuptools import setup; setup(name="foo")')
- assert not _is_setup_py(not_setup_py)
+def test_is_setup_py_not_named_setup_py(tmp_path):
+ not_setup_py = tmp_path.joinpath("not_setup.py")
+ not_setup_py.write_text('from setuptools import setup; setup(name="foo")')
+ assert not _is_setup_py(py.path.local(str(not_setup_py)))
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
def test_is_setup_py_is_a_setup_py(tmpdir, mod):
setup_py = tmpdir.join("setup.py")
- setup_py.write('from {} import setup; setup(name="foo")'.format(mod))
+ setup_py.write(f'from {mod} import setup; setup(name="foo")')
assert _is_setup_py(setup_py)
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
-def test_is_setup_py_different_encoding(tmpdir, mod):
- setup_py = tmpdir.join("setup.py")
+def test_is_setup_py_different_encoding(tmp_path, mod):
+ setup_py = tmp_path.joinpath("setup.py")
contents = (
"# -*- coding: cp1252 -*-\n"
'from {} import setup; setup(name="foo", description="€")\n'.format(mod)
)
- setup_py.write_binary(contents.encode("cp1252"))
- assert _is_setup_py(setup_py)
+ setup_py.write_bytes(contents.encode("cp1252"))
+ assert _is_setup_py(py.path.local(str(setup_py)))
import sys
import pytest
+from _pytest.pytester import Pytester
TESTCASES = [
@pytest.mark.parametrize("code, expected", TESTCASES)
-def test_error_diff(code, expected, testdir):
- expected = [line.lstrip() for line in expected.splitlines()]
- p = testdir.makepyfile(code)
- result = testdir.runpytest(p, "-vv")
- result.stdout.fnmatch_lines(expected)
+def test_error_diff(code: str, expected: str, pytester: Pytester) -> None:
+ expected_lines = [line.lstrip() for line in expected.splitlines()]
+ p = pytester.makepyfile(code)
+ result = pytester.runpytest(p, "-vv")
+ result.stdout.fnmatch_lines(expected_lines)
assert result.ret == 1
import sys
import pytest
+from _pytest.pytester import Pytester
-def test_enabled(testdir):
+def test_enabled(pytester: Pytester) -> None:
"""Test single crashing test displays a traceback."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import faulthandler
def test_crash():
faulthandler._sigabrt()
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
-def test_crash_near_exit(testdir):
+def test_crash_near_exit(pytester: Pytester) -> None:
"""Test that fault handler displays crashes that happen even after
pytest is exiting (for example, when the interpreter is shutting down)."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import faulthandler
import atexit
atexit.register(faulthandler._sigabrt)
"""
)
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
-def test_disabled(testdir):
+def test_disabled(pytester: Pytester) -> None:
"""Test option to disable fault handler in the command line."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import faulthandler
def test_disabled():
assert not faulthandler.is_enabled()
"""
)
- result = testdir.runpytest_subprocess("-p", "no:faulthandler")
+ result = pytester.runpytest_subprocess("-p", "no:faulthandler")
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
False,
],
)
-def test_timeout(testdir, enabled: bool) -> None:
+def test_timeout(pytester: Pytester, enabled: bool) -> None:
"""Test option to dump tracebacks after a certain timeout.
If faulthandler is disabled, no traceback will be dumped.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import os, time
def test_timeout():
time.sleep(1 if "CI" in os.environ else 0.1)
"""
)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
faulthandler_timeout = 0.01
)
args = ["-p", "no:faulthandler"] if not enabled else []
- result = testdir.runpytest_subprocess(*args)
+ result = pytester.runpytest_subprocess(*args)
tb_output = "most recent call first"
if enabled:
result.stderr.fnmatch_lines(["*%s*" % tb_output])
@pytest.mark.parametrize("hook_name", ["pytest_enter_pdb", "pytest_exception_interact"])
-def test_cancel_timeout_on_hook(monkeypatch, hook_name):
+def test_cancel_timeout_on_hook(monkeypatch, hook_name) -> None:
"""Make sure that we are cancelling any scheduled traceback dumping due
to timeout before entering pdb (pytest-dev/pytest-faulthandler#12) or any
other interactive exception (pytest-dev/pytest-faulthandler#14)."""
@pytest.mark.parametrize("faulthandler_timeout", [0, 2])
-def test_already_initialized(faulthandler_timeout, testdir):
+def test_already_initialized(faulthandler_timeout: int, pytester: Pytester) -> None:
"""Test for faulthandler being initialized earlier than pytest (#6575)."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test():
import faulthandler
assert faulthandler.is_enabled()
"""
)
- result = testdir.run(
+ result = pytester.run(
sys.executable,
"-X",
"faulthandler",
"-mpytest",
- testdir.tmpdir,
+ pytester.path,
"-o",
- "faulthandler_timeout={}".format(faulthandler_timeout),
+ f"faulthandler_timeout={faulthandler_timeout}",
)
# ensure warning is emitted if faulthandler_timeout is configured
warning_line = "*faulthandler.py*faulthandler module enabled before*"
+from pathlib import Path
from textwrap import dedent
import pytest
from _pytest.config.findpaths import get_common_ancestor
from _pytest.config.findpaths import get_dirs_from_args
from _pytest.config.findpaths import load_config_dict_from_file
-from _pytest.pathlib import Path
class TestLoadConfigDictFromFile:
import pytest
from _pytest.config import ExitCode
+from _pytest.pytester import Pytester
-def test_version_verbose(testdir, pytestconfig):
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- result = testdir.runpytest("--version", "--version")
+def test_version_verbose(pytester: Pytester, pytestconfig, monkeypatch) -> None:
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
+ result = pytester.runpytest("--version", "--version")
assert result.ret == 0
- result.stderr.fnmatch_lines(
- ["*pytest*{}*imported from*".format(pytest.__version__)]
- )
+ result.stderr.fnmatch_lines([f"*pytest*{pytest.__version__}*imported from*"])
if pytestconfig.pluginmanager.list_plugin_distinfo():
result.stderr.fnmatch_lines(["*setuptools registered plugins:", "*at*"])
-def test_version_less_verbose(testdir, pytestconfig):
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- result = testdir.runpytest("--version")
+def test_version_less_verbose(pytester: Pytester, pytestconfig, monkeypatch) -> None:
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
+ result = pytester.runpytest("--version")
assert result.ret == 0
# p = py.path.local(py.__file__).dirpath()
- result.stderr.fnmatch_lines(["pytest {}".format(pytest.__version__)])
+ result.stderr.fnmatch_lines([f"pytest {pytest.__version__}"])
-def test_help(testdir):
- result = testdir.runpytest("--help")
+def test_help(pytester: Pytester) -> None:
+ result = pytester.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines(
"""
)
-def test_none_help_param_raises_exception(testdir):
+def test_none_help_param_raises_exception(pytester: Pytester) -> None:
"""Test that a None help param raises a TypeError."""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_ini", None, default=True, type="bool")
"""
)
- result = testdir.runpytest("--help")
+ result = pytester.runpytest("--help")
result.stderr.fnmatch_lines(
["*TypeError: help argument cannot be None for test_ini*"]
)
-def test_empty_help_param(testdir):
+def test_empty_help_param(pytester: Pytester) -> None:
"""Test that an empty help param is displayed correctly."""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_ini", "", default=True, type="bool")
"""
)
- result = testdir.runpytest("--help")
+ result = pytester.runpytest("--help")
assert result.ret == 0
lines = [
" required_plugins (args):",
result.stdout.fnmatch_lines(lines, consecutive=True)
-def test_hookvalidation_unknown(testdir):
- testdir.makeconftest(
+def test_hookvalidation_unknown(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_hello(xyz):
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(["*unknown hook*pytest_hello*"])
-def test_hookvalidation_optional(testdir):
- testdir.makeconftest(
+def test_hookvalidation_optional(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
@pytest.hookimpl(optionalhook=True)
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
-def test_traceconfig(testdir):
- result = testdir.runpytest("--traceconfig")
+def test_traceconfig(pytester: Pytester) -> None:
+ result = pytester.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*using*pytest*py*", "*active plugins*"])
-def test_debug(testdir):
- result = testdir.runpytest_subprocess("--debug")
+def test_debug(pytester: Pytester) -> None:
+ result = pytester.runpytest_subprocess("--debug")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- p = testdir.tmpdir.join("pytestdebug.log")
- assert "pytest_sessionstart" in p.read()
+ p = pytester.path.joinpath("pytestdebug.log")
+ assert "pytest_sessionstart" in p.read_text("utf-8")
-def test_PYTEST_DEBUG(testdir, monkeypatch):
+def test_PYTEST_DEBUG(pytester: Pytester, monkeypatch) -> None:
monkeypatch.setenv("PYTEST_DEBUG", "1")
- result = testdir.runpytest_subprocess()
+ result = pytester.runpytest_subprocess()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(
["*pytest_plugin_registered*", "*manager*PluginManager*"]
import os
import platform
from datetime import datetime
+from pathlib import Path
from typing import cast
from typing import List
from typing import Tuple
+from typing import TYPE_CHECKING
from xml.dom import minidom
import py
import xmlschema
import pytest
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.junitxml import bin_xml_escape
from _pytest.junitxml import LogXML
-from _pytest.pathlib import Path
from _pytest.reports import BaseReport
from _pytest.reports import TestReport
from _pytest.store import Store
pass
"""
)
- result, dom = run_and_parse(
- "-o", "junit_duration_report={}".format(duration_report)
- )
+ result, dom = run_and_parse("-o", f"junit_duration_report={duration_report}")
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
val = float(tnode["time"])
def test_dont_configure_on_workers(tmpdir) -> None:
- gotten = [] # type: List[object]
+ gotten: List[object] = []
class FakeConfig:
if TYPE_CHECKING:
class Report(BaseReport):
longrepr = ustr
- sections = [] # type: List[Tuple[str, str]]
+ sections: List[Tuple[str, str]] = []
nodeid = "something"
location = "tests/filename.py", 42, "TestClass.method"
+ when = "teardown"
test_report = cast(TestReport, Report())
log = LogXML(str(path), None, family=xunit_family)
class Report(BaseReport):
- sections = [] # type: List[Tuple[str, str]]
+ sections: List[Tuple[str, str]] = []
nodeid = "test_node_id"
log.pytest_sessionstart()
class Report(BaseReport):
longrepr = "FooBarBaz"
- sections = [] # type: List[Tuple[str, str]]
+ sections: List[Tuple[str, str]] = []
nodeid = "something"
location = "tests/filename.py", 42, "TestClass.method"
url = test_url
# i.e.: Expect drive on windows because we just have drive:filename, whereas
# we expect a relative path on Linux.
- expect = (
- "*{}*".format(subst_p) if sys.platform == "win32" else "*sub2/test_foo.py*"
- )
+ expect = f"*{subst_p}*" if sys.platform == "win32" else "*sub2/test_foo.py*"
result.stdout.fnmatch_lines([expect])
import argparse
import os
import re
+from pathlib import Path
from typing import Optional
import py.path
from _pytest.config import UsageError
from _pytest.main import resolve_collection_argument
from _pytest.main import validate_basetemp
-from _pytest.pathlib import Path
from _pytest.pytester import Testdir
if exc == SystemExit:
assert result.stdout.lines[-3:] == [
- 'INTERNALERROR> File "{}", line 4, in pytest_sessionstart'.format(c1),
+ f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise SystemExit("boom")',
"INTERNALERROR> SystemExit: boom",
]
else:
assert result.stdout.lines[-3:] == [
- 'INTERNALERROR> File "{}", line 4, in pytest_sessionstart'.format(c1),
+ f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise ValueError("boom")',
"INTERNALERROR> ValueError: boom",
]
if returncode is False:
assert result.stderr.lines == ["mainloop: caught unexpected SystemExit!"]
else:
- assert result.stderr.lines == ["Exit: exiting after {}...".format(exc.__name__)]
+ assert result.stderr.lines == [f"Exit: exiting after {exc.__name__}..."]
@pytest.mark.parametrize("returncode", (None, 42))
import os
import sys
+from typing import List
+from typing import Optional
from unittest import mock
import pytest
from _pytest.config import ExitCode
-from _pytest.mark import MarkGenerator as Mark
+from _pytest.mark import MarkGenerator
from _pytest.mark.structures import EMPTY_PARAMETERSET_OPTION
from _pytest.nodes import Collector
from _pytest.nodes import Node
+from _pytest.pytester import Pytester
class TestMark:
assert attr in module.__all__ # type: ignore
def test_pytest_mark_notcallable(self) -> None:
- mark = Mark()
+ mark = MarkGenerator()
with pytest.raises(TypeError):
mark() # type: ignore[operator]
assert pytest.mark.foo(SomeClass) is SomeClass
assert pytest.mark.foo.with_args(SomeClass) is not SomeClass # type: ignore[comparison-overlap]
- def test_pytest_mark_name_starts_with_underscore(self):
- mark = Mark()
+ def test_pytest_mark_name_starts_with_underscore(self) -> None:
+ mark = MarkGenerator()
with pytest.raises(AttributeError):
mark._some_name
-def test_marked_class_run_twice(testdir):
+def test_marked_class_run_twice(pytester: Pytester) -> None:
"""Test fails file is run twice that contains marked class.
See issue#683.
"""
- py_file = testdir.makepyfile(
+ py_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('abc', [1, 2, 3])
assert abc in [1, 2, 3]
"""
)
- file_name = os.path.basename(py_file.strpath)
- rec = testdir.inline_run(file_name, file_name)
+ file_name = os.path.basename(py_file)
+ rec = pytester.inline_run(file_name, file_name)
rec.assertoutcome(passed=6)
-def test_ini_markers(testdir):
- testdir.makeini(
+def test_ini_markers(pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
markers =
a2: this is a smoke marker
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_markers(pytestconfig):
markers = pytestconfig.getini("markers")
assert markers[1].startswith("a2:")
"""
)
- rec = testdir.inline_run()
+ rec = pytester.inline_run()
rec.assertoutcome(passed=1)
-def test_markers_option(testdir):
- testdir.makeini(
+def test_markers_option(pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
markers =
nodescription
"""
)
- result = testdir.runpytest("--markers")
+ result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(
["*a1*this is a webtest*", "*a1some*another marker", "*nodescription*"]
)
-def test_ini_markers_whitespace(testdir):
- testdir.makeini(
+def test_ini_markers_whitespace(pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
markers =
a1 : this is a whitespace marker
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
assert True
"""
)
- rec = testdir.inline_run("--strict-markers", "-m", "a1")
+ rec = pytester.inline_run("--strict-markers", "-m", "a1")
rec.assertoutcome(passed=1)
-def test_marker_without_description(testdir):
- testdir.makefile(
+def test_marker_without_description(pytester: Pytester) -> None:
+ pytester.makefile(
".cfg",
setup="""
[tool:pytest]
markers=slow
""",
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
pytest.mark.xfail('FAIL')
"""
)
- ftdir = testdir.mkdir("ft1_dummy")
- testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py"))
- rec = testdir.runpytest("--strict-markers")
+ ftdir = pytester.mkdir("ft1_dummy")
+ pytester.path.joinpath("conftest.py").replace(ftdir.joinpath("conftest.py"))
+ rec = pytester.runpytest("--strict-markers")
rec.assert_outcomes()
-def test_markers_option_with_plugin_in_current_dir(testdir):
- testdir.makeconftest('pytest_plugins = "flip_flop"')
- testdir.makepyfile(
+def test_markers_option_with_plugin_in_current_dir(pytester: Pytester) -> None:
+ pytester.makeconftest('pytest_plugins = "flip_flop"')
+ pytester.makepyfile(
flip_flop="""\
def pytest_configure(config):
config.addinivalue_line("markers", "flip:flop")
return
metafunc.parametrize("x", (10, 20))"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""\
import pytest
@pytest.mark.flipper
assert x"""
)
- result = testdir.runpytest("--markers")
+ result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(["*flip*flop*"])
-def test_mark_on_pseudo_function(testdir):
- testdir.makepyfile(
+def test_mark_on_pseudo_function(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
pass
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("option_name", ["--strict-markers", "--strict"])
-def test_strict_prohibits_unregistered_markers(testdir, option_name):
- testdir.makepyfile(
+def test_strict_prohibits_unregistered_markers(
+ pytester: Pytester, option_name: str
+) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.unregisteredmark
pass
"""
)
- result = testdir.runpytest(option_name)
+ result = pytester.runpytest(option_name)
assert result.ret != 0
result.stdout.fnmatch_lines(
["'unregisteredmark' not found in `markers` configuration option"]
("xyz or xyz2", ["test_one", "test_two"]),
],
)
-def test_mark_option(expr: str, expected_passed: str, testdir) -> None:
- testdir.makepyfile(
+def test_mark_option(
+ expr: str, expected_passed: List[Optional[str]], pytester: Pytester
+) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.xyz
pass
"""
)
- rec = testdir.inline_run("-m", expr)
+ rec = pytester.inline_run("-m", expr)
passed, skipped, fail = rec.listoutcomes()
- passed = [x.nodeid.split("::")[-1] for x in passed]
- assert passed == expected_passed
+ passed_str = [x.nodeid.split("::")[-1] for x in passed]
+ assert passed_str == expected_passed
@pytest.mark.parametrize(
("expr", "expected_passed"),
[("interface", ["test_interface"]), ("not interface", ["test_nointer"])],
)
-def test_mark_option_custom(expr: str, expected_passed: str, testdir) -> None:
- testdir.makeconftest(
+def test_mark_option_custom(
+ expr: str, expected_passed: List[str], pytester: Pytester
+) -> None:
+ pytester.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(items):
item.add_marker(pytest.mark.interface)
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_interface():
pass
pass
"""
)
- rec = testdir.inline_run("-m", expr)
+ rec = pytester.inline_run("-m", expr)
passed, skipped, fail = rec.listoutcomes()
- passed = [x.nodeid.split("::")[-1] for x in passed]
- assert passed == expected_passed
+ passed_str = [x.nodeid.split("::")[-1] for x in passed]
+ assert passed_str == expected_passed
@pytest.mark.parametrize(
("not (1 or 2)", ["test_interface", "test_nointer", "test_pass"]),
],
)
-def test_keyword_option_custom(expr: str, expected_passed: str, testdir) -> None:
- testdir.makepyfile(
+def test_keyword_option_custom(
+ expr: str, expected_passed: List[str], pytester: Pytester
+) -> None:
+ pytester.makepyfile(
"""
def test_interface():
pass
pass
"""
)
- rec = testdir.inline_run("-k", expr)
+ rec = pytester.inline_run("-k", expr)
passed, skipped, fail = rec.listoutcomes()
- passed = [x.nodeid.split("::")[-1] for x in passed]
- assert passed == expected_passed
+ passed_str = [x.nodeid.split("::")[-1] for x in passed]
+ assert passed_str == expected_passed
-def test_keyword_option_considers_mark(testdir):
- testdir.copy_example("marks/marks_considered_keywords")
- rec = testdir.inline_run("-k", "foo")
+def test_keyword_option_considers_mark(pytester: Pytester) -> None:
+ pytester.copy_example("marks/marks_considered_keywords")
+ rec = pytester.inline_run("-k", "foo")
passed = rec.listoutcomes()[0]
assert len(passed) == 1
("2-3", ["test_func[2-3]"]),
],
)
-def test_keyword_option_parametrize(expr: str, expected_passed: str, testdir) -> None:
- testdir.makepyfile(
+def test_keyword_option_parametrize(
+ expr: str, expected_passed: List[str], pytester: Pytester
+) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
pass
"""
)
- rec = testdir.inline_run("-k", expr)
+ rec = pytester.inline_run("-k", expr)
passed, skipped, fail = rec.listoutcomes()
- passed = [x.nodeid.split("::")[-1] for x in passed]
- assert passed == expected_passed
+ passed_str = [x.nodeid.split("::")[-1] for x in passed]
+ assert passed_str == expected_passed
-def test_parametrize_with_module(testdir):
- testdir.makepyfile(
+def test_parametrize_with_module(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [pytest,])
pass
"""
)
- rec = testdir.inline_run()
+ rec = pytester.inline_run()
passed, skipped, fail = rec.listoutcomes()
expected_id = "test_func[" + pytest.__name__ + "]"
assert passed[0].nodeid.split("::")[-1] == expected_id
],
)
def test_keyword_option_wrong_arguments(
- expr: str, expected_error: str, testdir, capsys
+ expr: str, expected_error: str, pytester: Pytester, capsys
) -> None:
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_func(arg):
pass
"""
)
- testdir.inline_run("-k", expr)
+ pytester.inline_run("-k", expr)
err = capsys.readouterr().err
assert expected_error in err
-def test_parametrized_collected_from_command_line(testdir):
+def test_parametrized_collected_from_command_line(pytester: Pytester) -> None:
"""Parametrized test not collected if test named specified in command
line issue#649."""
- py_file = testdir.makepyfile(
+ py_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
pass
"""
)
- file_name = os.path.basename(py_file.strpath)
- rec = testdir.inline_run(file_name + "::" + "test_func")
+ file_name = os.path.basename(py_file)
+ rec = pytester.inline_run(file_name + "::" + "test_func")
rec.assertoutcome(passed=3)
-def test_parametrized_collect_with_wrong_args(testdir):
+def test_parametrized_collect_with_wrong_args(pytester: Pytester) -> None:
"""Test collect parametrized func with wrong number of args."""
- py_file = testdir.makepyfile(
+ py_file = pytester.makepyfile(
"""
import pytest
"""
)
- result = testdir.runpytest(py_file)
+ result = pytester.runpytest(py_file)
result.stdout.fnmatch_lines(
[
'test_parametrized_collect_with_wrong_args.py::test_func: in "parametrize" the number of names (2):',
)
-def test_parametrized_with_kwargs(testdir):
+def test_parametrized_with_kwargs(pytester: Pytester) -> None:
"""Test collect parametrized func with wrong number of args."""
- py_file = testdir.makepyfile(
+ py_file = pytester.makepyfile(
"""
import pytest
"""
)
- result = testdir.runpytest(py_file)
+ result = pytester.runpytest(py_file)
assert result.ret == 0
-def test_parametrize_iterator(testdir):
+def test_parametrize_iterator(pytester: Pytester) -> None:
"""`parametrize` should work with generators (#5354)."""
- py_file = testdir.makepyfile(
+ py_file = pytester.makepyfile(
"""\
import pytest
assert a >= 1
"""
)
- result = testdir.runpytest(py_file)
+ result = pytester.runpytest(py_file)
assert result.ret == 0
# should not skip any tests
result.stdout.fnmatch_lines(["*3 passed*"])
class TestFunctional:
- def test_merging_markers_deep(self, testdir):
+ def test_merging_markers_deep(self, pytester: Pytester) -> None:
# issue 199 - propagate markers into nested classes
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
class TestA(object):
assert True
"""
)
- items, rec = testdir.inline_genitems(p)
+ items, rec = pytester.inline_genitems(p)
for item in items:
print(item, item.keywords)
assert [x for x in item.iter_markers() if x.name == "a"]
- def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
- p = testdir.makepyfile(
+ def test_mark_decorator_subclass_does_not_propagate_to_base(
+ self, pytester: Pytester
+ ) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def test_bar(self): pass
"""
)
- items, rec = testdir.inline_genitems(p)
+ items, rec = pytester.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",))
- def test_mark_should_not_pass_to_siebling_class(self, testdir):
+ def test_mark_should_not_pass_to_siebling_class(self, pytester: Pytester) -> None:
"""#568"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
"""
)
- items, rec = testdir.inline_genitems(p)
+ items, rec = pytester.inline_genitems(p)
base_item, sub_item, sub_item_other = items
print(items, [x.nodeid for x in items])
# new api segregates
assert not list(sub_item_other.iter_markers(name="b"))
assert list(sub_item.iter_markers(name="b"))
- def test_mark_decorator_baseclasses_merged(self, testdir):
- p = testdir.makepyfile(
+ def test_mark_decorator_baseclasses_merged(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def test_bar(self): pass
"""
)
- items, rec = testdir.inline_genitems(p)
+ items, rec = pytester.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d"))
- def test_mark_closest(self, testdir):
- p = testdir.makepyfile(
+ def test_mark_closest(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
"""
)
- items, rec = testdir.inline_genitems(p)
+ items, rec = pytester.inline_genitems(p)
has_own, has_inherited = items
- assert has_own.get_closest_marker("c").kwargs == {"location": "function"}
- assert has_inherited.get_closest_marker("c").kwargs == {"location": "class"}
+ has_own_marker = has_own.get_closest_marker("c")
+ has_inherited_marker = has_inherited.get_closest_marker("c")
+ assert has_own_marker is not None
+ assert has_inherited_marker is not None
+ assert has_own_marker.kwargs == {"location": "function"}
+ assert has_inherited_marker.kwargs == {"location": "class"}
assert has_own.get_closest_marker("missing") is None
- def test_mark_with_wrong_marker(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_mark_with_wrong_marker(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
import pytest
class pytestmark(object):
assert len(values) == 1
assert "TypeError" in str(values[0].longrepr)
- def test_mark_dynamically_in_funcarg(self, testdir):
- testdir.makeconftest(
+ def test_mark_dynamically_in_funcarg(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
@pytest.fixture
terminalreporter._tw.line("keyword: %s" % values[0].keywords)
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_func(arg):
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["keyword: *hello*"])
- def test_no_marker_match_on_unmarked_names(self, testdir):
- p = testdir.makepyfile(
+ def test_no_marker_match_on_unmarked_names(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.mark.shouldmatch
assert 1
"""
)
- reprec = testdir.inline_run("-m", "test_unmarked", p)
+ reprec = pytester.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
- def test_keywords_at_node_level(self, testdir):
- testdir.makepyfile(
+ def test_keywords_at_node_level(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope="session", autouse=True)
pass
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
- def test_keyword_added_for_session(self, testdir):
- testdir.makeconftest(
+ def test_keyword_added_for_session(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(session):
session.add_marker(10))
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_some(request):
assert "mark1" in request.keywords
assert marker.kwargs == {}
"""
)
- reprec = testdir.inline_run("-m", "mark1")
+ reprec = pytester.inline_run("-m", "mark1")
reprec.assertoutcome(passed=1)
- def assert_markers(self, items, **expected):
+ def assert_markers(self, items, **expected) -> None:
"""Assert that given items have expected marker names applied to them.
expected should be a dict of (item name -> seq of expected marker names).
- Note: this could be moved to ``testdir`` if proven to be useful
+ Note: this could be moved to ``pytester`` if proven to be useful
to other modules.
"""
items = {x.name: x for x in items}
assert markers == set(expected_markers)
@pytest.mark.filterwarnings("ignore")
- def test_mark_from_parameters(self, testdir):
+ def test_mark_from_parameters(self, pytester: Pytester) -> None:
"""#1540"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
assert True
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(skipped=1)
- def test_reevaluate_dynamic_expr(self, testdir):
+ def test_reevaluate_dynamic_expr(self, pytester: Pytester) -> None:
"""#7360"""
- py_file1 = testdir.makepyfile(
+ py_file1 = pytester.makepyfile(
test_reevaluate_dynamic_expr1="""
import pytest
assert True
"""
)
- py_file2 = testdir.makepyfile(
+ py_file2 = pytester.makepyfile(
test_reevaluate_dynamic_expr2="""
import pytest
"""
)
- file_name1 = os.path.basename(py_file1.strpath)
- file_name2 = os.path.basename(py_file2.strpath)
- reprec = testdir.inline_run(file_name1, file_name2)
+ file_name1 = os.path.basename(py_file1)
+ file_name2 = os.path.basename(py_file2)
+ reprec = pytester.inline_run(file_name1, file_name2)
reprec.assertoutcome(passed=1, skipped=1)
class TestKeywordSelection:
- def test_select_simple(self, testdir):
- file_test = testdir.makepyfile(
+ def test_select_simple(self, pytester: Pytester) -> None:
+ file_test = pytester.makepyfile(
"""
def test_one():
assert 0
)
def check(keyword, name):
- reprec = testdir.inline_run("-s", "-k", keyword, file_test)
+ reprec = pytester.inline_run("-s", "-k", keyword, file_test)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
assert failed[0].nodeid.split("::")[-1] == name
"xxx and TestClass and test_2",
],
)
- def test_select_extra_keywords(self, testdir, keyword):
- p = testdir.makepyfile(
+ def test_select_extra_keywords(self, pytester: Pytester, keyword) -> None:
+ p = pytester.makepyfile(
test_select="""
def test_1():
pass
pass
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
conftest="""
import pytest
@pytest.hookimpl(hookwrapper=True)
item.extra_keyword_matches.add("xxx")
"""
)
- reprec = testdir.inline_run(p.dirpath(), "-s", "-k", keyword)
+ reprec = pytester.inline_run(p.parent, "-s", "-k", keyword)
print("keyword", repr(keyword))
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 1
assert len(dlist) == 1
assert dlist[0].items[0].name == "test_1"
- def test_select_starton(self, testdir):
- threepass = testdir.makepyfile(
+ def test_select_starton(self, pytester: Pytester) -> None:
+ threepass = pytester.makepyfile(
test_threepass="""
def test_one(): assert 1
def test_two(): assert 1
def test_three(): assert 1
"""
)
- reprec = testdir.inline_run("-k", "test_two:", threepass)
+ reprec = pytester.inline_run("-k", "test_two:", threepass)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 2
assert not failed
item = dlist[0].items[0]
assert item.name == "test_one"
- def test_keyword_extra(self, testdir):
- p = testdir.makepyfile(
+ def test_keyword_extra(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_one():
assert 0
test_one.mykeyword = True
"""
)
- reprec = testdir.inline_run("-k", "mykeyword", p)
+ reprec = pytester.inline_run("-k", "mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
@pytest.mark.xfail
- def test_keyword_extra_dash(self, testdir):
- p = testdir.makepyfile(
+ def test_keyword_extra_dash(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_one():
assert 0
)
# with argparse the argument to an option cannot
# start with '-'
- reprec = testdir.inline_run("-k", "-mykeyword", p)
+ reprec = pytester.inline_run("-k", "-mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert passed + skipped + failed == 0
@pytest.mark.parametrize(
"keyword", ["__", "+", ".."],
)
- def test_no_magic_values(self, testdir, keyword: str) -> None:
+ def test_no_magic_values(self, pytester: Pytester, keyword: str) -> None:
"""Make sure the tests do not match on magic values,
no double underscored values, like '__dict__' and '+'.
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def test_one(): assert 1
"""
)
- reprec = testdir.inline_run("-k", keyword, p)
+ reprec = pytester.inline_run("-k", keyword, p)
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed + skipped + failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
- def test_no_match_directories_outside_the_suite(self, testdir):
+ def test_no_match_directories_outside_the_suite(self, pytester: Pytester) -> None:
"""`-k` should not match against directories containing the test suite (#7040)."""
test_contents = """
def test_aaa(): pass
def test_ddd(): pass
"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{"ddd/tests/__init__.py": "", "ddd/tests/test_foo.py": test_contents}
)
def get_collected_names(*args):
- _, rec = testdir.inline_genitems(*args)
+ _, rec = pytester.inline_genitems(*args)
calls = rec.getcalls("pytest_collection_finish")
assert len(calls) == 1
return [x.name for x in calls[0].session.items]
assert get_collected_names() == ["test_aaa", "test_ddd"]
# do not collect anything based on names outside the collection tree
- assert get_collected_names("-k", testdir.tmpdir.basename) == []
+ assert get_collected_names("-k", pytester._name) == []
# "-k ddd" should only collect "test_ddd", but not
# 'test_aaa' just because one of its parent directories is named "ddd";
("foo", pytest.mark.bar(), False),
],
)
- def test__eq__(self, lhs, rhs, expected):
+ def test__eq__(self, lhs, rhs, expected) -> None:
assert (lhs == rhs) == expected
def test_aliases(self) -> None:
@pytest.mark.parametrize("mark", [None, "", "skip", "xfail"])
-def test_parameterset_for_parametrize_marks(testdir, mark):
+def test_parameterset_for_parametrize_marks(
+ pytester: Pytester, mark: Optional[str]
+) -> None:
if mark is not None:
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
{}={}
)
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
pytest_configure(config)
assert result_mark.kwargs.get("run") is False
-def test_parameterset_for_fail_at_collect(testdir):
- testdir.makeini(
+def test_parameterset_for_fail_at_collect(pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
{}=fail_at_collect
)
)
- config = testdir.parseconfig()
+ config = pytester.parseconfig()
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
pytest_configure(config)
):
get_empty_parameterset_mark(config, ["a"], pytest_configure)
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
import pytest
pass
"""
)
- result = testdir.runpytest(str(p1))
+ result = pytester.runpytest(str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
assert result.ret == ExitCode.INTERRUPTED
-def test_parameterset_for_parametrize_bad_markname(testdir):
+def test_parameterset_for_parametrize_bad_markname(pytester: Pytester) -> None:
with pytest.raises(pytest.UsageError):
- test_parameterset_for_parametrize_marks(testdir, "bad")
+ test_parameterset_for_parametrize_marks(pytester, "bad")
-def test_mark_expressions_no_smear(testdir):
- testdir.makepyfile(
+def test_mark_expressions_no_smear(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
"""
)
- reprec = testdir.inline_run("-m", "FOO")
+ reprec = pytester.inline_run("-m", "FOO")
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed == 1
# todo: fixed
# keywords smear - expected behaviour
- # reprec_keywords = testdir.inline_run("-k", "FOO")
+ # reprec_keywords = pytester.inline_run("-k", "FOO")
# passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes()
# assert passed_k == 2
# assert skipped_k == failed_k == 0
-def test_addmarker_order():
+def test_addmarker_order() -> None:
session = mock.Mock()
session.own_markers = []
session.parent = None
@pytest.mark.filterwarnings("ignore")
-def test_markers_from_parametrize(testdir):
+def test_markers_from_parametrize(pytester: Pytester) -> None:
"""#3605"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.assert_outcomes(passed=4)
@pytest.mark.parametrize("s", (None, "hello world"))
-def test_pytest_param_id_allows_none_or_string(s):
+def test_pytest_param_id_allows_none_or_string(s) -> None:
assert pytest.param(id=s)
@pytest.mark.parametrize("expr", ("NOT internal_err", "NOT (internal_err)", "bogus/"))
-def test_marker_expr_eval_failure_handling(testdir, expr):
- foo = testdir.makepyfile(
+def test_marker_expr_eval_failure_handling(pytester: Pytester, expr) -> None:
+ foo = pytester.makepyfile(
"""
import pytest
pass
"""
)
- expected = "ERROR: Wrong expression passed to '-m': {}: *".format(expr)
- result = testdir.runpytest(foo, "-m", expr)
+ expected = f"ERROR: Wrong expression passed to '-m': {expr}: *"
+ result = pytester.runpytest(foo, "-m", expr)
result.stderr.fnmatch_lines([expected])
assert result.ret == ExitCode.USAGE_ERROR
def _modules() -> List[str]:
- pytest_pkg = _pytest.__path__ # type: str # type: ignore
+ pytest_pkg: str = _pytest.__path__ # type: ignore
return sorted(
n
for _, n, _ in pkgutil.walk_packages(pytest_pkg, prefix=_pytest.__name__ + ".")
subprocess.check_call((
sys.executable,
"-W", "error",
- # https://github.com/pytest-dev/pytest/issues/5901
- "-W", "ignore:The usage of `cmp` is deprecated and will be removed on or after 2021-06-01. Please use `eq` and `order` instead.:DeprecationWarning", # noqa: E501
- "-c", "__import__({!r})".format(module),
+ "-c", f"__import__({module!r})",
))
# fmt: on
import textwrap
from typing import Dict
from typing import Generator
+from typing import Type
import py
import pytest
-from _pytest.compat import TYPE_CHECKING
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Testdir
-if TYPE_CHECKING:
- from typing import Type
-
@pytest.fixture
def mp() -> Generator[MonkeyPatch, None, None]:
def test_setitem_deleted_meanwhile() -> None:
- d = {} # type: Dict[str, object]
+ d: Dict[str, object] = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
del d["x"]
def test_delitem() -> None:
- d = {"x": 1} # type: Dict[str, object]
+ d: Dict[str, object] = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, "x")
assert "x" not in d
@pytest.mark.parametrize(
"Sample", [Sample, SampleInherit], ids=["new", "new-inherit"],
)
-def test_issue156_undo_staticmethod(Sample: "Type[Sample]") -> None:
+def test_issue156_undo_staticmethod(Sample: Type[Sample]) -> None:
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, "hello", None)
assert inspect.isclass(functools.partial)
+def test_context_classmethod() -> None:
+ class A:
+ x = 1
+
+ with MonkeyPatch.context() as m:
+ m.setattr(A, "x", 2)
+ assert A.x == 2
+ assert A.x == 1
+
+
def test_syspath_prepend_with_namespace_packages(
testdir: Testdir, monkeypatch: MonkeyPatch
) -> None:
+from typing import List
+from typing import Type
+
import py
import pytest
from _pytest import nodes
-from _pytest.pytester import Testdir
+from _pytest.pytester import Pytester
+from _pytest.warning_types import PytestWarning
@pytest.mark.parametrize(
- "baseid, nodeid, expected",
+ ("nodeid", "expected"),
(
- ("", "", True),
- ("", "foo", True),
- ("", "foo/bar", True),
- ("", "foo/bar::TestBaz", True),
- ("foo", "food", False),
- ("foo/bar::TestBaz", "foo/bar", False),
- ("foo/bar::TestBaz", "foo/bar::TestBop", False),
- ("foo/bar", "foo/bar::TestBop", True),
+ ("", [""]),
+ ("a", ["", "a"]),
+ ("aa/b", ["", "aa", "aa/b"]),
+ ("a/b/c", ["", "a", "a/b", "a/b/c"]),
+ ("a/bbb/c::D", ["", "a", "a/bbb", "a/bbb/c", "a/bbb/c::D"]),
+ ("a/b/c::D::eee", ["", "a", "a/b", "a/b/c", "a/b/c::D", "a/b/c::D::eee"]),
+ # :: considered only at the last component.
+ ("::xx", ["", "::xx"]),
+ ("a/b/c::D/d::e", ["", "a", "a/b", "a/b/c::D", "a/b/c::D/d", "a/b/c::D/d::e"]),
+ # : alone is not a separator.
+ ("a/b::D:e:f::g", ["", "a", "a/b", "a/b::D:e:f", "a/b::D:e:f::g"]),
),
)
-def test_ischildnode(baseid: str, nodeid: str, expected: bool) -> None:
- result = nodes.ischildnode(baseid, nodeid)
- assert result is expected
+def test_iterparentnodeids(nodeid: str, expected: List[str]) -> None:
+ result = list(nodes.iterparentnodeids(nodeid))
+ assert result == expected
def test_node_from_parent_disallowed_arguments() -> None:
nodes.Node.from_parent(None, config=None) # type: ignore[arg-type]
-def test_std_warn_not_pytestwarning(testdir: Testdir) -> None:
- items = testdir.getitems(
+@pytest.mark.parametrize(
+ "warn_type, msg", [(DeprecationWarning, "deprecated"), (PytestWarning, "pytest")]
+)
+def test_node_warn_is_no_longer_only_pytest_warnings(
+ pytester: Pytester, warn_type: Type[Warning], msg: str
+) -> None:
+ items = pytester.getitems(
+ """
+ def test():
+ pass
+ """
+ )
+ with pytest.warns(warn_type, match=msg):
+ items[0].warn(warn_type(msg))
+
+
+def test_node_warning_enforces_warning_types(pytester: Pytester) -> None:
+ items = pytester.getitems(
"""
def test():
pass
"""
)
- with pytest.raises(ValueError, match=".*instance of PytestWarning.*"):
- items[0].warn(UserWarning("some warning")) # type: ignore[arg-type]
+ with pytest.raises(
+ ValueError, match="warning must be an instance of Warning or subclass"
+ ):
+ items[0].warn(Exception("ok")) # type: ignore[arg-type]
def test__check_initialpaths_for_relpath() -> None:
assert nodes._check_initialpaths_for_relpath(FakeSession2, outside) is None
-def test_failure_with_changed_cwd(testdir):
+def test_failure_with_changed_cwd(pytester: Pytester) -> None:
"""
Test failure lines should use absolute paths if cwd has changed since
invocation, so the path is correct (#6428).
"""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import os
import pytest
assert False
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines([str(p) + ":*: AssertionError", "*1 failed in *"])
import pytest
+from _pytest.pytester import Pytester
def setup_module(mod):
mod.nose = pytest.importorskip("nose")
-def test_nose_setup(testdir):
- p = testdir.makepyfile(
+def test_nose_setup(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
values = []
from nose.tools import with_setup
test_hello.teardown = lambda: values.append(2)
"""
)
- result = testdir.runpytest(p, "-p", "nose")
+ result = pytester.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
-def test_setup_func_with_setup_decorator():
+def test_setup_func_with_setup_decorator() -> None:
from _pytest.nose import call_optional
values = []
assert not values
-def test_setup_func_not_callable():
+def test_setup_func_not_callable() -> None:
from _pytest.nose import call_optional
class A:
call_optional(A(), "f")
-def test_nose_setup_func(testdir):
- p = testdir.makepyfile(
+def test_nose_setup_func(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
from nose.tools import with_setup
"""
)
- result = testdir.runpytest(p, "-p", "nose")
+ result = pytester.runpytest(p, "-p", "nose")
result.assert_outcomes(passed=2)
-def test_nose_setup_func_failure(testdir):
- p = testdir.makepyfile(
+def test_nose_setup_func_failure(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
from nose.tools import with_setup
"""
)
- result = testdir.runpytest(p, "-p", "nose")
+ result = pytester.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*TypeError: <lambda>()*"])
-def test_nose_setup_func_failure_2(testdir):
- testdir.makepyfile(
+def test_nose_setup_func_failure_2(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
values = []
test_hello.teardown = my_teardown
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_nose_setup_partial(testdir):
+def test_nose_setup_partial(pytester: Pytester) -> None:
pytest.importorskip("functools")
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
from functools import partial
test_hello.teardown = my_teardown_partial
"""
)
- result = testdir.runpytest(p, "-p", "nose")
+ result = pytester.runpytest(p, "-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
-def test_module_level_setup(testdir):
- testdir.makepyfile(
+def test_module_level_setup(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
from nose.tools import with_setup
items = {}
assert 1 not in items
"""
)
- result = testdir.runpytest("-p", "nose")
+ result = pytester.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
-def test_nose_style_setup_teardown(testdir):
- testdir.makepyfile(
+def test_nose_style_setup_teardown(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
values = []
assert values == [1]
"""
)
- result = testdir.runpytest("-p", "nose")
+ result = pytester.runpytest("-p", "nose")
result.stdout.fnmatch_lines(["*2 passed*"])
-def test_nose_setup_ordering(testdir):
- testdir.makepyfile(
+def test_nose_setup_ordering(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def setup_module(mod):
mod.visited = True
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_apiwrapper_problem_issue260(testdir):
+def test_apiwrapper_problem_issue260(pytester: Pytester) -> None:
# this would end up trying a call an optional teardown on the class
# for plain unittests we don't want nose behaviour
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import unittest
class TestCase(unittest.TestCase):
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.assert_outcomes(passed=1)
-def test_setup_teardown_linking_issue265(testdir):
+def test_setup_teardown_linking_issue265(pytester: Pytester) -> None:
# we accidentally didn't integrate nose setupstate with normal setupstate
# this test ensures that won't happen again
- testdir.makepyfile(
+ pytester.makepyfile(
'''
import pytest
raise Exception("should not call teardown for skipped tests")
'''
)
- reprec = testdir.runpytest()
+ reprec = pytester.runpytest()
reprec.assert_outcomes(passed=1, skipped=1)
-def test_SkipTest_during_collection(testdir):
- p = testdir.makepyfile(
+def test_SkipTest_during_collection(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import nose
raise nose.SkipTest("during collection")
assert False
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.assert_outcomes(skipped=1)
-def test_SkipTest_in_test(testdir):
- testdir.makepyfile(
+def test_SkipTest_in_test(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import nose
raise nose.SkipTest("in test")
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(skipped=1)
-def test_istest_function_decorator(testdir):
- p = testdir.makepyfile(
+def test_istest_function_decorator(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import nose.tools
@nose.tools.istest
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.assert_outcomes(passed=1)
-def test_nottest_function_decorator(testdir):
- testdir.makepyfile(
+def test_nottest_function_decorator(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import nose.tools
@nose.tools.nottest
pass
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
-def test_istest_class_decorator(testdir):
- p = testdir.makepyfile(
+def test_istest_class_decorator(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import nose.tools
@nose.tools.istest
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.assert_outcomes(passed=1)
-def test_nottest_class_decorator(testdir):
- testdir.makepyfile(
+def test_nottest_class_decorator(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import nose.tools
@nose.tools.nottest
pass
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
-def test_skip_test_with_unicode(testdir):
- testdir.makepyfile(
+def test_skip_test_with_unicode(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""\
import unittest
class TestClass():
raise unittest.SkipTest('😊')
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 skipped *"])
-def test_raises(testdir):
- testdir.makepyfile(
+def test_raises(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
from nose.tools import raises
raise BaseException
"""
)
- result = testdir.runpytest("-vv")
+ result = pytester.runpytest("-vv")
result.stdout.fnmatch_lines(
[
"test_raises.py::test_raises_runtimeerror PASSED*",
import pytest
from _pytest.config import argparsing as parseopt
from _pytest.config.exceptions import UsageError
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
@pytest.fixture
assert "--preferences=value1 value2 value3" in help
-def test_argcomplete(testdir, monkeypatch) -> None:
+def test_argcomplete(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
try:
bash_version = subprocess.run(
["bash", "--version"],
# See #7518.
pytest.skip("not a real bash")
- script = str(testdir.tmpdir.join("test_argcomplete"))
+ script = str(pytester.path.joinpath("test_argcomplete"))
with open(str(script), "w") as fp:
# redirect output from argcomplete to stdin and stderr is not trivial
arg = "--fu"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
- result = testdir.run("bash", str(script), arg)
+ result = pytester.run("bash", str(script), arg)
if result.ret == 255:
# argcomplete not found
pytest.skip("argcomplete not available")
arg = "test_argc"
monkeypatch.setenv("COMP_LINE", "pytest " + arg)
monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg)))
- result = testdir.run("bash", str(script), arg)
+ result = pytester.run("bash", str(script), arg)
result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
+import io
from typing import List
from typing import Union
import pytest
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
class TestPasteCapture:
@pytest.fixture
def pastebinlist(self, monkeypatch, request) -> List[Union[str, bytes]]:
- pastebinlist = [] # type: List[Union[str, bytes]]
+ pastebinlist: List[Union[str, bytes]] = []
plugin = request.config.pluginmanager.getplugin("pastebin")
monkeypatch.setattr(plugin, "create_new_paste", pastebinlist.append)
return pastebinlist
- def test_failed(self, testdir, pastebinlist):
- testpath = testdir.makepyfile(
+ def test_failed(self, pytester: Pytester, pastebinlist) -> None:
+ testpath = pytester.makepyfile(
"""
import pytest
- def test_pass():
+ def test_pass() -> None:
pass
def test_fail():
assert 0
pytest.skip("")
"""
)
- reprec = testdir.inline_run(testpath, "--pastebin=failed")
+ reprec = pytester.inline_run(testpath, "--pastebin=failed")
assert len(pastebinlist) == 1
s = pastebinlist[0]
assert s.find("def test_fail") != -1
assert reprec.countoutcomes() == [1, 1, 1]
- def test_all(self, testdir, pastebinlist):
+ def test_all(self, pytester: Pytester, pastebinlist) -> None:
from _pytest.pytester import LineMatcher
- testpath = testdir.makepyfile(
+ testpath = pytester.makepyfile(
"""
import pytest
def test_pass():
pytest.skip("")
"""
)
- reprec = testdir.inline_run(testpath, "--pastebin=all", "-v")
+ reprec = pytester.inline_run(testpath, "--pastebin=all", "-v")
assert reprec.countoutcomes() == [1, 1, 1]
assert len(pastebinlist) == 1
contents = pastebinlist[0].decode("utf-8")
]
)
- def test_non_ascii_paste_text(self, testdir, pastebinlist):
+ def test_non_ascii_paste_text(self, pytester: Pytester, pastebinlist) -> None:
"""Make sure that text which contains non-ascii characters is pasted
correctly. See #1219.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
test_unicode="""\
def test():
assert '☺' == 1
"""
)
- result = testdir.runpytest("--pastebin=all")
+ result = pytester.runpytest("--pastebin=all")
expected_msg = "*assert '☺' == 1*"
result.stdout.fnmatch_lines(
[
return request.config.pluginmanager.getplugin("pastebin")
@pytest.fixture
- def mocked_urlopen_fail(self, monkeypatch):
+ def mocked_urlopen_fail(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen call to emulate a HTTP Error 400."""
calls = []
def mocked(url, data):
calls.append((url, data))
- raise urllib.error.HTTPError(url, 400, "Bad request", None, None)
+ raise urllib.error.HTTPError(url, 400, "Bad request", {}, io.BytesIO())
monkeypatch.setattr(urllib.request, "urlopen", mocked)
return calls
@pytest.fixture
- def mocked_urlopen_invalid(self, monkeypatch):
+ def mocked_urlopen_invalid(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen calls done by the internal plugin
function that connects to bpaste service, but return a url in an
unexpected format."""
return calls
@pytest.fixture
- def mocked_urlopen(self, monkeypatch):
+ def mocked_urlopen(self, monkeypatch: MonkeyPatch):
"""Monkeypatch the actual urlopen calls done by the internal plugin
function that connects to bpaste service."""
calls = []
monkeypatch.setattr(urllib.request, "urlopen", mocked)
return calls
- def test_pastebin_invalid_url(self, pastebin, mocked_urlopen_invalid):
+ def test_pastebin_invalid_url(self, pastebin, mocked_urlopen_invalid) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert (
result
)
assert len(mocked_urlopen_invalid) == 1
- def test_pastebin_http_error(self, pastebin, mocked_urlopen_fail):
+ def test_pastebin_http_error(self, pastebin, mocked_urlopen_fail) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert result == "bad response: HTTP Error 400: Bad request"
assert len(mocked_urlopen_fail) == 1
- def test_create_new_paste(self, pastebin, mocked_urlopen):
+ def test_create_new_paste(self, pastebin, mocked_urlopen) -> None:
result = pastebin.create_new_paste(b"full-paste-contents")
assert result == "https://bpaste.net/show/3c0c6750bd"
assert len(mocked_urlopen) == 1
assert "code=full-paste-contents" in data.decode()
assert "expiry=1week" in data.decode()
- def test_create_new_paste_failure(self, pastebin, monkeypatch):
+ def test_create_new_paste_failure(self, pastebin, monkeypatch: MonkeyPatch) -> None:
import io
import urllib.request
import os.path
import sys
import unittest.mock
+from pathlib import Path
from textwrap import dedent
import py
from _pytest.pathlib import import_path
from _pytest.pathlib import ImportPathMismatchError
from _pytest.pathlib import maybe_delete_a_numbered_dir
-from _pytest.pathlib import Path
from _pytest.pathlib import resolve_package_path
+from _pytest.pathlib import symlink_or_skip
+from _pytest.pathlib import visit
class TestFNMatcherPort:
assert commonpath(subpath, path) == path
assert commonpath(Path(str(path) + "suffix"), path) == path.parent
assert commonpath(path, path.parent.parent) == path.parent.parent
+
+
+def test_visit_ignores_errors(tmpdir) -> None:
+ symlink_or_skip("recursive", tmpdir.join("recursive"))
+ tmpdir.join("foo").write_binary(b"")
+ tmpdir.join("bar").write_binary(b"")
+
+ assert [entry.name for entry in visit(tmpdir, recurse=lambda entry: False)] == [
+ "bar",
+ "foo",
+ ]
saveindent.append(pytestpm.trace.root.indent)
raise ValueError()
- values = [] # type: List[str]
+ values: List[str] = []
pytestpm.trace.root.setwriter(values.append)
undo = pytestpm.enable_tracing()
try:
from _pytest.pytester import CwdSnapshot
from _pytest.pytester import HookRecorder
from _pytest.pytester import LineMatcher
+from _pytest.pytester import Pytester
from _pytest.pytester import SysModulesSnapshot
from _pytest.pytester import SysPathsSnapshot
from _pytest.pytester import Testdir
def spy_factory(self):
class SysModulesSnapshotSpy:
- instances = [] # type: List[SysModulesSnapshotSpy]
+ instances: List["SysModulesSnapshotSpy"] = [] # noqa: F821
def __init__(self, preserve=None) -> None:
SysModulesSnapshotSpy.instances.append(self)
original_data = list(getattr(sys, path_type))
original_other = getattr(sys, other_path_type)
original_other_data = list(original_other)
- new = [] # type: List[object]
+ new: List[object] = []
snapshot = SysPathsSnapshot()
monkeypatch.setattr(sys, path_type, new)
snapshot.restore()
obtained = str(e.value).splitlines()
if function == "no_fnmatch_line":
assert obtained == [
- "nomatch: '{}'".format(good_pattern),
+ f"nomatch: '{good_pattern}'",
" and: 'cachedir: .pytest_cache'",
" and: 'collecting ... collected 1 item'",
" and: ''",
- "fnmatch: '{}'".format(good_pattern),
+ f"fnmatch: '{good_pattern}'",
" with: 'show_fixtures_per_test.py OK'",
]
else:
assert obtained == [
- " nomatch: '{}'".format(good_pattern),
+ f" nomatch: '{good_pattern}'",
" and: 'cachedir: .pytest_cache'",
" and: 'collecting ... collected 1 item'",
" and: ''",
- "re.match: '{}'".format(good_pattern),
+ f"re.match: '{good_pattern}'",
" with: 'show_fixtures_per_test.py OK'",
]
assert str(e.value).splitlines() == ["fnmatch: '*'", " with: '1'"]
+def test_linematcher_string_api() -> None:
+ lm = LineMatcher(["foo", "bar"])
+ assert str(lm) == "foo\nbar"
+
+
def test_pytester_addopts_before_testdir(request, monkeypatch) -> None:
orig = os.environ.get("PYTEST_ADDOPTS", None)
monkeypatch.setenv("PYTEST_ADDOPTS", "--orig-unused")
assert result.ret == 0
-def test_spawn_uses_tmphome(testdir) -> None:
- tmphome = str(testdir.tmpdir)
+def test_spawn_uses_tmphome(pytester: Pytester) -> None:
+ tmphome = str(pytester.path)
assert os.environ.get("HOME") == tmphome
- testdir.monkeypatch.setenv("CUSTOMENV", "42")
+ pytester._monkeypatch.setenv("CUSTOMENV", "42")
- p1 = testdir.makepyfile(
+ p1 = pytester.makepyfile(
"""
import os
tmphome=tmphome
)
)
- child = testdir.spawn_pytest(str(p1))
+ child = pytester.spawn_pytest(str(p1))
out = child.read()
assert child.wait() == 0, out.decode("utf8")
def test_makefile_joins_absolute_path(testdir: Testdir) -> None:
absfile = testdir.tmpdir / "absfile"
- if sys.platform == "win32":
- with pytest.raises(OSError):
- testdir.makepyfile(**{str(absfile): ""})
- else:
- p1 = testdir.makepyfile(**{str(absfile): ""})
- assert str(p1) == (testdir.tmpdir / absfile) + ".py"
+ p1 = testdir.makepyfile(**{str(absfile): ""})
+ assert str(p1) == str(testdir.tmpdir / "absfile.py")
+
+
+def test_testtmproot(testdir):
+ """Check test_tmproot is a py.path attribute for backward compatibility."""
+ assert testdir.test_tmproot.check(dir=1)
from typing import Optional
import pytest
+from _pytest.pytester import Pytester
from _pytest.recwarn import WarningsRecorder
assert warn.filename == __file__
-def test_recwarn_functional(testdir) -> None:
- testdir.makepyfile(
+def test_recwarn_functional(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import warnings
def test_method(recwarn):
assert isinstance(warn.message, UserWarning)
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
class TestWarningsRecorderChecker:
def test_recording(self) -> None:
- rec = WarningsRecorder()
+ rec = WarningsRecorder(_ispytest=True)
with rec:
assert not rec.list
warnings.warn_explicit("hello", UserWarning, "xyz", 13)
def test_warn_stacklevel(self) -> None:
"""#4243"""
- rec = WarningsRecorder()
+ rec = WarningsRecorder(_ispytest=True)
with rec:
warnings.warn("test", DeprecationWarning, 2)
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
- WarningsChecker(5) # type: ignore
+ WarningsChecker(5, _ispytest=True) # type: ignore[arg-type]
with pytest.raises(TypeError):
- WarningsChecker(("hi", RuntimeWarning)) # type: ignore
+ WarningsChecker(("hi", RuntimeWarning), _ispytest=True) # type: ignore[arg-type]
with pytest.raises(TypeError):
- WarningsChecker([DeprecationWarning, RuntimeWarning]) # type: ignore
+ WarningsChecker([DeprecationWarning, RuntimeWarning], _ispytest=True) # type: ignore[arg-type]
def test_invalid_enter_exit(self) -> None:
# wrap this test in WarningsRecorder to ensure warning state gets reset
- with WarningsRecorder():
+ with WarningsRecorder(_ispytest=True):
with pytest.raises(RuntimeError):
- rec = WarningsRecorder()
+ rec = WarningsRecorder(_ispytest=True)
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
- rec = WarningsRecorder()
+ rec = WarningsRecorder(_ispytest=True)
with rec:
with rec:
pass # can't enter twice
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
- def test_double_test(self, testdir) -> None:
+ def test_double_test(self, pytester: Pytester) -> None:
"""If a test is run again, the warning should still be raised"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
import warnings
warnings.warn("runtime", RuntimeWarning)
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 passed in*"])
def test_match_regex(self) -> None:
-import sys
+from pathlib import Path
from typing import Sequence
from typing import Union
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ExceptionRepr
from _pytest.config import Config
-from _pytest.pathlib import Path
from _pytest.pytester import Testdir
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
reprec = testdir.inline_run()
if report_class is TestReport:
- reports = reprec.getreports(
- "pytest_runtest_logreport"
- ) # type: Union[Sequence[TestReport], Sequence[CollectReport]]
+ reports: Union[
+ Sequence[TestReport], Sequence[CollectReport]
+ ] = reprec.getreports("pytest_runtest_logreport")
# we have 3 reports: setup/call/teardown
assert len(reports) == 3
# get the call report
from subprocess to main process creates an artificial exception, which ExceptionInfo
can't obtain the ReprFileLocation from.
"""
- # somehow in Python 3.5 on Windows this test fails with:
- # File "c:\...\3.5.4\x64\Lib\multiprocessing\connection.py", line 302, in _recv_bytes
- # overlapped=True)
- # OSError: [WinError 6] The handle is invalid
- #
- # so in this platform we opted to use a mock traceback which is identical to the
- # one produced by the multiprocessing module
- if sys.version_info[:2] <= (3, 5) and sys.platform.startswith("win"):
- testdir.makepyfile(
- """
- # equivalent of multiprocessing.pool.RemoteTraceback
- class RemoteTraceback(Exception):
- def __init__(self, tb):
- self.tb = tb
- def __str__(self):
- return self.tb
- def test_a():
- try:
- raise ValueError('value error')
- except ValueError as e:
- # equivalent to how multiprocessing.pool.rebuild_exc does it
- e.__cause__ = RemoteTraceback('runtime error')
- raise e
+ testdir.makepyfile(
"""
- )
- else:
- testdir.makepyfile(
- """
- from concurrent.futures import ProcessPoolExecutor
+ from concurrent.futures import ProcessPoolExecutor
- def func():
- raise ValueError('value error')
+ def func():
+ raise ValueError('value error')
- def test_a():
- with ProcessPoolExecutor() as p:
- p.submit(func).result()
- """
- )
+ def test_a():
+ with ProcessPoolExecutor() as p:
+ p.submit(func).result()
+ """
+ )
testdir.syspathinsert()
reprec = testdir.inline_run()
from typing import Dict
from typing import List
from typing import Tuple
+from typing import Type
import py
from _pytest import outcomes
from _pytest import reports
from _pytest import runner
-from _pytest.compat import TYPE_CHECKING
from _pytest.config import ExitCode
from _pytest.outcomes import OutcomeException
-if TYPE_CHECKING:
- from typing import Type
-
class TestSetupState:
def test_setup(self, testdir) -> None:
assert reps[2].failed
assert reps[2].when == "teardown"
assert reps[2].longrepr.reprcrash.message in (
- # python3 error
"TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'",
- # python2 error
- "TypeError: teardown_method() takes exactly 4 arguments (2 given)",
+ # Python >= 3.10
+ "TypeError: TestClass.teardown_method() missing 2 required positional arguments: 'y' and 'z'",
)
def test_failure_in_setup_function_ignores_custom_repr(self, testdir) -> None:
assert res[1].name == "TestClass"
-reporttypes = [
+reporttypes: List[Type[reports.BaseReport]] = [
reports.BaseReport,
reports.TestReport,
reports.CollectReport,
-] # type: List[Type[reports.BaseReport]]
+]
@pytest.mark.parametrize(
"reporttype", reporttypes, ids=[x.__name__ for x in reporttypes]
)
-def test_report_extra_parameters(reporttype: "Type[reports.BaseReport]") -> None:
+def test_report_extra_parameters(reporttype: Type[reports.BaseReport]) -> None:
args = list(inspect.signature(reporttype.__init__).parameters.keys())[1:]
- basekw = dict.fromkeys(args, []) # type: Dict[str, List[object]]
+ basekw: Dict[str, List[object]] = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
ci2 = runner.CallInfo.from_call(lambda: 0 / 0, "collect")
assert ci2.when == "collect"
assert not hasattr(ci2, "result")
- assert repr(ci2) == "<CallInfo when='collect' excinfo={!r}>".format(ci2.excinfo)
+ assert repr(ci2) == f"<CallInfo when='collect' excinfo={ci2.excinfo!r}>"
assert str(ci2) == repr(ci2)
assert ci2.excinfo
assert 0, "assert_msg"
ci3 = runner.CallInfo.from_call(raise_assertion, "call")
- assert repr(ci3) == "<CallInfo when='call' excinfo={!r}>".format(ci3.excinfo)
+ assert repr(ci3) == f"<CallInfo when='call' excinfo={ci3.excinfo!r}>"
assert "\n" not in repr(ci3)
def test_current_test_env_var(testdir, monkeypatch) -> None:
- pytest_current_test_vars = [] # type: List[Tuple[str, str]]
+ pytest_current_test_vars: List[Tuple[str, str]] = []
monkeypatch.setattr(
sys, "pytest_current_test_vars", pytest_current_test_vars, raising=False
)
from typing import List
import pytest
+from _pytest.pytester import Pytester
-def test_module_and_function_setup(testdir):
- reprec = testdir.inline_runsource(
+def test_module_and_function_setup(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
modlevel = []
def setup_module(module):
assert rep.passed
-def test_module_setup_failure_no_teardown(testdir):
- reprec = testdir.inline_runsource(
+def test_module_setup_failure_no_teardown(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
values = []
def setup_module(module):
assert calls[0].item.module.values == [1]
-def test_setup_function_failure_no_teardown(testdir):
- reprec = testdir.inline_runsource(
+def test_setup_function_failure_no_teardown(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
modlevel = []
def setup_function(function):
assert calls[0].item.module.modlevel == [1]
-def test_class_setup(testdir):
- reprec = testdir.inline_runsource(
+def test_class_setup(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
reprec.assertoutcome(passed=1 + 2 + 1)
-def test_class_setup_failure_no_teardown(testdir):
- reprec = testdir.inline_runsource(
+def test_class_setup_failure_no_teardown(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
class TestSimpleClassSetup(object):
clslevel = []
reprec.assertoutcome(failed=1, passed=1)
-def test_method_setup(testdir):
- reprec = testdir.inline_runsource(
+def test_method_setup(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
class TestSetupMethod(object):
def setup_method(self, meth):
reprec.assertoutcome(passed=2)
-def test_method_setup_failure_no_teardown(testdir):
- reprec = testdir.inline_runsource(
+def test_method_setup_failure_no_teardown(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
class TestMethodSetup(object):
clslevel = []
reprec.assertoutcome(failed=1, passed=1)
-def test_method_setup_uses_fresh_instances(testdir):
- reprec = testdir.inline_runsource(
+def test_method_setup_uses_fresh_instances(pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
class TestSelfState1(object):
memory = []
reprec.assertoutcome(passed=2, failed=0)
-def test_setup_that_skips_calledagain(testdir):
- p = testdir.makepyfile(
+def test_setup_that_skips_calledagain(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def setup_module(mod):
pass
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(skipped=2)
-def test_setup_fails_again_on_all_tests(testdir):
- p = testdir.makepyfile(
+def test_setup_fails_again_on_all_tests(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def setup_module(mod):
pass
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
reprec.assertoutcome(failed=2)
-def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
- p = testdir.makepyfile(
+def test_setup_funcarg_setup_when_outer_scope_fails(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def setup_module(mod):
pass
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*function1*",
@pytest.mark.parametrize("arg", ["", "arg"])
def test_setup_teardown_function_level_with_optional_argument(
- testdir, monkeypatch, arg: str,
+ pytester: Pytester, monkeypatch, arg: str,
) -> None:
"""Parameter to setup/teardown xunit-style functions parameter is now optional (#1728)."""
import sys
- trace_setups_teardowns = [] # type: List[str]
+ trace_setups_teardowns: List[str] = []
monkeypatch.setattr(
sys, "trace_setups_teardowns", trace_setups_teardowns, raising=False
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
import sys
arg=arg
)
)
- result = testdir.inline_run(p)
+ result = pytester.inline_run(p)
result.assertoutcome(passed=4)
expected = [
import pytest
from _pytest.config import ExitCode
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
class SessionTests:
- def test_basic_testitem_events(self, testdir):
- tfile = testdir.makepyfile(
+ def test_basic_testitem_events(self, pytester: Pytester) -> None:
+ tfile = pytester.makepyfile(
"""
def test_one():
pass
pass
"""
)
- reprec = testdir.inline_run(tfile)
+ reprec = pytester.inline_run(tfile)
passed, skipped, failed = reprec.listoutcomes()
assert len(skipped) == 0
assert len(passed) == 1
# assert len(colreports) == 4
# assert colreports[1].report.failed
- def test_nested_import_error(self, testdir):
- tfile = testdir.makepyfile(
+ def test_nested_import_error(self, pytester: Pytester) -> None:
+ tfile = pytester.makepyfile(
"""
import import_fails
def test_this():
a = 1
""",
)
- reprec = testdir.inline_run(tfile)
+ reprec = pytester.inline_run(tfile)
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("does_not_work") != -1
- def test_raises_output(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_raises_output(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
import pytest
def test_raises_doesnt():
)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
- out = failed[0].longrepr.reprcrash.message
+ out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr]
assert "DID NOT RAISE" in out
- def test_syntax_error_module(self, testdir):
- reprec = testdir.inline_runsource("this is really not python")
+ def test_syntax_error_module(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource("this is really not python")
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("not python") != -1
- def test_exit_first_problem(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_exit_first_problem(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
assert failed == 1
assert passed == skipped == 0
- def test_maxfail(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_maxfail(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
assert failed == 2
assert passed == skipped == 0
- def test_broken_repr(self, testdir):
- p = testdir.makepyfile(
+ def test_broken_repr(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
"""
)
- reprec = testdir.inline_run(p)
+ reprec = pytester.inline_run(p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)
- out = failed[0].longrepr.reprcrash.message
+ out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr]
assert out.find("<[reprexc() raised in repr()] BrokenRepr1") != -1
- def test_broken_repr_with_showlocals_verbose(self, testdir):
- p = testdir.makepyfile(
+ def test_broken_repr_with_showlocals_verbose(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
class ObjWithErrorInRepr:
def __repr__(self):
assert x == "value"
"""
)
- reprec = testdir.inline_run("--showlocals", "-vv", p)
+ reprec = pytester.inline_run("--showlocals", "-vv", p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (0, 0, 1)
- entries = failed[0].longrepr.reprtraceback.reprentries
+ entries = failed[0].longrepr.reprtraceback.reprentries # type: ignore[union-attr]
assert len(entries) == 1
repr_locals = entries[0].reprlocals
assert repr_locals.lines
"x = <[NotImplementedError() raised in repr()] ObjWithErrorInRepr"
)
- def test_skip_file_by_conftest(self, testdir):
- testdir.makepyfile(
+ def test_skip_file_by_conftest(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
conftest="""
import pytest
def pytest_collect_file():
""",
)
try:
- reprec = testdir.inline_run(testdir.tmpdir)
+ reprec = pytester.inline_run(pytester.path)
except pytest.skip.Exception: # pragma: no cover
pytest.fail("wrong skipped caught")
reports = reprec.getreports("pytest_collectreport")
class TestNewSession(SessionTests):
- def test_order_of_execution(self, testdir):
- reprec = testdir.inline_runsource(
+ def test_order_of_execution(self, pytester: Pytester) -> None:
+ reprec = pytester.inline_runsource(
"""
values = []
def test_1():
assert failed == skipped == 0
assert passed == 7
- def test_collect_only_with_various_situations(self, testdir):
- p = testdir.makepyfile(
+ def test_collect_only_with_various_situations(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
def test_one():
raise ValueError()
test_three="xxxdsadsadsadsa",
__init__="",
)
- reprec = testdir.inline_run("--collect-only", p.dirpath())
+ reprec = pytester.inline_run("--collect-only", p.parent)
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 3
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
- def test_minus_x_import_error(self, testdir):
- testdir.makepyfile(__init__="")
- testdir.makepyfile(test_one="xxxx", test_two="yyyy")
- reprec = testdir.inline_run("-x", testdir.tmpdir)
+ def test_minus_x_import_error(self, pytester: Pytester) -> None:
+ pytester.makepyfile(__init__="")
+ pytester.makepyfile(test_one="xxxx", test_two="yyyy")
+ reprec = pytester.inline_run("-x", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
- def test_minus_x_overridden_by_maxfail(self, testdir):
- testdir.makepyfile(__init__="")
- testdir.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
- reprec = testdir.inline_run("-x", "--maxfail=2", testdir.tmpdir)
+ def test_minus_x_overridden_by_maxfail(self, pytester: Pytester) -> None:
+ pytester.makepyfile(__init__="")
+ pytester.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
+ reprec = pytester.inline_run("-x", "--maxfail=2", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 2
-def test_plugin_specify(testdir):
+def test_plugin_specify(pytester: Pytester) -> None:
with pytest.raises(ImportError):
- testdir.parseconfig("-p", "nqweotexistent")
+ pytester.parseconfig("-p", "nqweotexistent")
# pytest.raises(ImportError,
# "config.do_configure(config)"
# )
-def test_plugin_already_exists(testdir):
- config = testdir.parseconfig("-p", "terminal")
+def test_plugin_already_exists(pytester: Pytester) -> None:
+ config = pytester.parseconfig("-p", "terminal")
assert config.option.plugins == ["terminal"]
config._do_configure()
config._ensure_unconfigure()
-def test_exclude(testdir):
- hellodir = testdir.mkdir("hello")
- hellodir.join("test_hello.py").write("x y syntaxerror")
- hello2dir = testdir.mkdir("hello2")
- hello2dir.join("test_hello2.py").write("x y syntaxerror")
- testdir.makepyfile(test_ok="def test_pass(): pass")
- result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
+def test_exclude(pytester: Pytester) -> None:
+ hellodir = pytester.mkdir("hello")
+ hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
+ hello2dir = pytester.mkdir("hello2")
+ hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
+ pytester.makepyfile(test_ok="def test_pass(): pass")
+ result = pytester.runpytest("--ignore=hello", "--ignore=hello2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_exclude_glob(testdir):
- hellodir = testdir.mkdir("hello")
- hellodir.join("test_hello.py").write("x y syntaxerror")
- hello2dir = testdir.mkdir("hello2")
- hello2dir.join("test_hello2.py").write("x y syntaxerror")
- hello3dir = testdir.mkdir("hallo3")
- hello3dir.join("test_hello3.py").write("x y syntaxerror")
- subdir = testdir.mkdir("sub")
- subdir.join("test_hello4.py").write("x y syntaxerror")
- testdir.makepyfile(test_ok="def test_pass(): pass")
- result = testdir.runpytest("--ignore-glob=*h[ea]llo*")
+def test_exclude_glob(pytester: Pytester) -> None:
+ hellodir = pytester.mkdir("hello")
+ hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
+ hello2dir = pytester.mkdir("hello2")
+ hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
+ hello3dir = pytester.mkdir("hallo3")
+ hello3dir.joinpath("test_hello3.py").write_text("x y syntaxerror")
+ subdir = pytester.mkdir("sub")
+ subdir.joinpath("test_hello4.py").write_text("x y syntaxerror")
+ pytester.makepyfile(test_ok="def test_pass(): pass")
+ result = pytester.runpytest("--ignore-glob=*h[ea]llo*")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_deselect(testdir):
- testdir.makepyfile(
+def test_deselect(pytester: Pytester) -> None:
+ pytester.makepyfile(
test_a="""
import pytest
def test_c2(self): pass
"""
)
- result = testdir.runpytest(
+ result = pytester.runpytest(
"-v",
"--deselect=test_a.py::test_a2[1]",
"--deselect=test_a.py::test_a2[2]",
assert not line.startswith(("test_a.py::test_a2[1]", "test_a.py::test_a2[2]"))
-def test_sessionfinish_with_start(testdir):
- testdir.makeconftest(
+def test_sessionfinish_with_start(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import os
values = []
"""
)
- res = testdir.runpytest("--collect-only")
+ res = pytester.runpytest("--collect-only")
assert res.ret == ExitCode.NO_TESTS_COLLECTED
@pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"])
-def test_rootdir_option_arg(testdir, monkeypatch, path):
- monkeypatch.setenv("PY_ROOTDIR_PATH", str(testdir.tmpdir))
- path = path.format(relative=str(testdir.tmpdir), environment="$PY_ROOTDIR_PATH")
-
- rootdir = testdir.mkdir("root")
- rootdir.mkdir("tests")
- testdir.makepyfile(
+def test_rootdir_option_arg(
+ pytester: Pytester, monkeypatch: MonkeyPatch, path: str
+) -> None:
+ monkeypatch.setenv("PY_ROOTDIR_PATH", str(pytester.path))
+ path = path.format(relative=str(pytester.path), environment="$PY_ROOTDIR_PATH")
+
+ rootdir = pytester.path / "root" / "tests"
+ rootdir.mkdir(parents=True)
+ pytester.makepyfile(
"""
import os
def test_one():
"""
)
- result = testdir.runpytest("--rootdir={}".format(path))
+ result = pytester.runpytest(f"--rootdir={path}")
result.stdout.fnmatch_lines(
[
- "*rootdir: {}/root".format(testdir.tmpdir),
+ f"*rootdir: {pytester.path}/root",
"root/test_rootdir_option_arg.py *",
"*1 passed*",
]
)
-def test_rootdir_wrong_option_arg(testdir):
- result = testdir.runpytest("--rootdir=wrong_dir")
+def test_rootdir_wrong_option_arg(pytester: Pytester) -> None:
+ result = pytester.runpytest("--rootdir=wrong_dir")
result.stderr.fnmatch_lines(
["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"]
)
import pytest
from _pytest.config import ExitCode
+from _pytest.pytester import Pytester
@pytest.fixture(params=["--setup-only", "--setup-plan", "--setup-show"], scope="module")
return request.param
-def test_show_only_active_fixtures(testdir, mode, dummy_yaml_custom_test):
- testdir.makepyfile(
+def test_show_only_active_fixtures(
+ pytester: Pytester, mode, dummy_yaml_custom_test
+) -> None:
+ pytester.makepyfile(
'''
import pytest
@pytest.fixture
'''
)
- result = testdir.runpytest(mode)
+ result = pytester.runpytest(mode)
assert result.ret == 0
result.stdout.fnmatch_lines(
result.stdout.no_fnmatch_line("*_arg0*")
-def test_show_different_scopes(testdir, mode):
- p = testdir.makepyfile(
+def test_show_different_scopes(pytester: Pytester, mode) -> None:
+ p = pytester.makepyfile(
'''
import pytest
@pytest.fixture
'''
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_nested_fixtures(testdir, mode):
- testdir.makeconftest(
+def test_show_nested_fixtures(pytester: Pytester, mode) -> None:
+ pytester.makeconftest(
'''
import pytest
@pytest.fixture(scope='session')
"""session scoped fixture"""
'''
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
'''
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_fixtures_with_autouse(testdir, mode):
- p = testdir.makepyfile(
+def test_show_fixtures_with_autouse(pytester: Pytester, mode) -> None:
+ p = pytester.makepyfile(
'''
import pytest
@pytest.fixture
'''
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_fixtures_with_parameters(testdir, mode):
- testdir.makeconftest(
+def test_show_fixtures_with_parameters(pytester: Pytester, mode) -> None:
+ pytester.makeconftest(
'''
import pytest
@pytest.fixture(scope='session', params=['foo', 'bar'])
"""session scoped fixture"""
'''
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
'''
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_fixtures_with_parameter_ids(testdir, mode):
- testdir.makeconftest(
+def test_show_fixtures_with_parameter_ids(pytester: Pytester, mode) -> None:
+ pytester.makeconftest(
'''
import pytest
@pytest.fixture(
"""session scoped fixture"""
'''
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
'''
import pytest
@pytest.fixture(scope='function')
'''
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_fixtures_with_parameter_ids_function(testdir, mode):
- p = testdir.makepyfile(
+def test_show_fixtures_with_parameter_ids_function(pytester: Pytester, mode) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper())
"""
)
- result = testdir.runpytest(mode, p)
+ result = pytester.runpytest(mode, p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_dynamic_fixture_request(testdir):
- p = testdir.makepyfile(
+def test_dynamic_fixture_request(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture()
"""
)
- result = testdir.runpytest("--setup-only", p)
+ result = pytester.runpytest("--setup-only", p)
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_capturing(testdir):
- p = testdir.makepyfile(
+def test_capturing(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest, sys
@pytest.fixture()
"""
)
- result = testdir.runpytest("--setup-only", p)
+ result = pytester.runpytest("--setup-only", p)
result.stdout.fnmatch_lines(
["this should be captured", "this should also be captured"]
)
-def test_show_fixtures_and_execute_test(testdir):
+def test_show_fixtures_and_execute_test(pytester: Pytester) -> None:
"""Verify that setups are shown and tests are executed."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
"""
)
- result = testdir.runpytest("--setup-show", p)
+ result = pytester.runpytest("--setup-show", p)
assert result.ret == 1
result.stdout.fnmatch_lines(
)
-def test_setup_show_with_KeyboardInterrupt_in_test(testdir):
- p = testdir.makepyfile(
+def test_setup_show_with_KeyboardInterrupt_in_test(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
raise KeyboardInterrupt()
"""
)
- result = testdir.runpytest("--setup-show", p, no_reraise_ctrlc=True)
+ result = pytester.runpytest("--setup-show", p, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
"*SETUP F arg*",
assert result.ret == ExitCode.INTERRUPTED
-def test_show_fixture_action_with_bytes(testdir):
+def test_show_fixture_action_with_bytes(pytester: Pytester) -> None:
# Issue 7126, BytesWarning when using --setup-show with bytes parameter
- test_file = testdir.makepyfile(
+ test_file = pytester.makepyfile(
"""
import pytest
pass
"""
)
- result = testdir.run(
+ result = pytester.run(
sys.executable, "-bb", "-m", "pytest", "--setup-show", str(test_file)
)
assert result.ret == 0
-def test_show_fixtures_and_test(testdir, dummy_yaml_custom_test):
+from _pytest.pytester import Pytester
+
+
+def test_show_fixtures_and_test(
+ pytester: Pytester, dummy_yaml_custom_test: None
+) -> None:
"""Verify that fixtures are not executed."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture
"""
)
- result = testdir.runpytest("--setup-plan")
+ result = pytester.runpytest("--setup-plan")
assert result.ret == 0
result.stdout.fnmatch_lines(
)
-def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(testdir):
+def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(
+ pytester: Pytester,
+) -> None:
"""Verify that when a fixture lives for longer than a single test, --setup-plan
correctly displays the SETUP/TEARDOWN indicators the right number of times.
correct fixture lifetimes. It was purely a display bug for --setup-plan, and
did not affect the related --setup-show or --setup-only.)
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'class')
"""
)
- result = testdir.runpytest("--setup-plan")
+ result = pytester.runpytest("--setup-plan")
assert result.ret == 0
setup_fragment = "SETUP C fix"
assert teardown_count == 1
-def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(testdir):
+def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(
+ pytester: Pytester,
+) -> None:
"""Verify that SETUP/TEARDOWN messages match what comes out of --setup-show."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'session')
"""
)
- plan_result = testdir.runpytest("--setup-plan")
- show_result = testdir.runpytest("--setup-show")
+ plan_result = pytester.runpytest("--setup-plan")
+ show_result = pytester.runpytest("--setup-show")
# the number and text of these lines should be identical
plan_lines = [
import sys
+import textwrap
import pytest
-from _pytest.pytester import Testdir
+from _pytest.pytester import Pytester
from _pytest.runner import runtestprotocol
from _pytest.skipping import evaluate_skip_marks
from _pytest.skipping import evaluate_xfail_marks
class TestEvaluation:
- def test_no_marker(self, testdir):
- item = testdir.getitem("def test_func(): pass")
+ def test_no_marker(self, pytester: Pytester) -> None:
+ item = pytester.getitem("def test_func(): pass")
skipped = evaluate_skip_marks(item)
assert not skipped
- def test_marked_xfail_no_args(self, testdir):
- item = testdir.getitem(
+ def test_marked_xfail_no_args(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail
assert xfailed.reason == ""
assert xfailed.run
- def test_marked_skipif_no_args(self, testdir):
- item = testdir.getitem(
+ def test_marked_skipif_no_args(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif
assert skipped
assert skipped.reason == ""
- def test_marked_one_arg(self, testdir):
- item = testdir.getitem(
+ def test_marked_one_arg(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
assert skipped
assert skipped.reason == "condition: hasattr(os, 'sep')"
- def test_marked_one_arg_with_reason(self, testdir):
- item = testdir.getitem(
+ def test_marked_one_arg_with_reason(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world")
assert skipped
assert skipped.reason == "hello world"
- def test_marked_one_arg_twice(self, testdir):
+ def test_marked_one_arg_twice(self, pytester: Pytester) -> None:
lines = [
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""",
"""@pytest.mark.skipif(condition="hasattr(os, 'murks')")""",
]
for i in range(0, 2):
- item = testdir.getitem(
+ item = pytester.getitem(
"""
import pytest
%s
assert skipped
assert skipped.reason == "condition: not hasattr(os, 'murks')"
- def test_marked_one_arg_twice2(self, testdir):
- item = testdir.getitem(
+ def test_marked_one_arg_twice2(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'murks')")
assert skipped
assert skipped.reason == "condition: not hasattr(os, 'murks')"
- def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None:
- item = testdir.getitem(
+ def test_marked_skipif_with_boolean_without_reason(
+ self, pytester: Pytester
+ ) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif(False)
in excinfo.value.msg
)
- def test_marked_skipif_with_invalid_boolean(self, testdir) -> None:
- item = testdir.getitem(
+ def test_marked_skipif_with_invalid_boolean(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg
assert "INVALID" in excinfo.value.msg
- def test_skipif_class(self, testdir):
- (item,) = testdir.getitems(
+ def test_skipif_class(self, pytester: Pytester) -> None:
+ (item,) = pytester.getitems(
"""
import pytest
class TestClass(object):
pass
"""
)
- item.config._hackxyz = 3
+ item.config._hackxyz = 3 # type: ignore[attr-defined]
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "condition: config._hackxyz"
+ def test_skipif_markeval_namespace(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
+ """
+ import pytest
+
+ def pytest_markeval_namespace():
+ return {"color": "green"}
+ """
+ )
+ p = pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.skipif("color == 'green'")
+ def test_1():
+ assert True
+
+ @pytest.mark.skipif("color == 'red'")
+ def test_2():
+ assert True
+ """
+ )
+ res = pytester.runpytest(p)
+ assert res.ret == 0
+ res.stdout.fnmatch_lines(["*1 skipped*"])
+ res.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_skipif_markeval_namespace_multiple(self, pytester: Pytester) -> None:
+ """Keys defined by ``pytest_markeval_namespace()`` in nested plugins override top-level ones."""
+ root = pytester.mkdir("root")
+ root.joinpath("__init__.py").touch()
+ root.joinpath("conftest.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ def pytest_markeval_namespace():
+ return {"arg": "root"}
+ """
+ )
+ )
+ root.joinpath("test_root.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ @pytest.mark.skipif("arg == 'root'")
+ def test_root():
+ assert False
+ """
+ )
+ )
+ foo = root.joinpath("foo")
+ foo.mkdir()
+ foo.joinpath("__init__.py").touch()
+ foo.joinpath("conftest.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ def pytest_markeval_namespace():
+ return {"arg": "foo"}
+ """
+ )
+ )
+ foo.joinpath("test_foo.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ @pytest.mark.skipif("arg == 'foo'")
+ def test_foo():
+ assert False
+ """
+ )
+ )
+ bar = root.joinpath("bar")
+ bar.mkdir()
+ bar.joinpath("__init__.py").touch()
+ bar.joinpath("conftest.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ def pytest_markeval_namespace():
+ return {"arg": "bar"}
+ """
+ )
+ )
+ bar.joinpath("test_bar.py").write_text(
+ textwrap.dedent(
+ """\
+ import pytest
+
+ @pytest.mark.skipif("arg == 'bar'")
+ def test_bar():
+ assert False
+ """
+ )
+ )
+
+ reprec = pytester.inline_run("-vs", "--capture=no")
+ reprec.assertoutcome(skipped=3)
+
+ def test_skipif_markeval_namespace_ValueError(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
+ """
+ import pytest
+
+ def pytest_markeval_namespace():
+ return True
+ """
+ )
+ p = pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.skipif("color == 'green'")
+ def test_1():
+ assert True
+ """
+ )
+ res = pytester.runpytest(p)
+ assert res.ret == 1
+ res.stdout.fnmatch_lines(
+ [
+ "*ValueError: pytest_markeval_namespace() needs to return a dict, got True*"
+ ]
+ )
+
class TestXFail:
@pytest.mark.parametrize("strict", [True, False])
- def test_xfail_simple(self, testdir, strict):
- item = testdir.getitem(
+ def test_xfail_simple(self, pytester: Pytester, strict: bool) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail(strict=%s)
assert callreport.skipped
assert callreport.wasxfail == ""
- def test_xfail_xpassed(self, testdir):
- item = testdir.getitem(
+ def test_xfail_xpassed(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail(reason="this is an xfail")
assert callreport.passed
assert callreport.wasxfail == "this is an xfail"
- def test_xfail_using_platform(self, testdir):
+ def test_xfail_using_platform(self, pytester: Pytester) -> None:
"""Verify that platform can be used with xfail statements."""
- item = testdir.getitem(
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail("platform.platform() == platform.platform()")
callreport = reports[1]
assert callreport.wasxfail
- def test_xfail_xpassed_strict(self, testdir):
- item = testdir.getitem(
+ def test_xfail_xpassed_strict(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail(strict=True, reason="nope")
assert str(callreport.longrepr) == "[XPASS(strict)] nope"
assert not hasattr(callreport, "wasxfail")
- def test_xfail_run_anyway(self, testdir):
- testdir.makepyfile(
+ def test_xfail_run_anyway(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail
pytest.xfail("hello")
"""
)
- result = testdir.runpytest("--runxfail")
+ result = pytester.runpytest("--runxfail")
result.stdout.fnmatch_lines(
["*def test_func():*", "*assert 0*", "*1 failed*1 pass*"]
)
),
],
)
- def test_xfail_run_with_skip_mark(self, testdir, test_input, expected):
- testdir.makepyfile(
+ def test_xfail_run_with_skip_mark(
+ self, pytester: Pytester, test_input, expected
+ ) -> None:
+ pytester.makepyfile(
test_sample="""
import pytest
@pytest.mark.skip
assert 0
"""
)
- result = testdir.runpytest(*test_input)
+ result = pytester.runpytest(*test_input)
result.stdout.fnmatch_lines(expected)
- def test_xfail_evalfalse_but_fails(self, testdir):
- item = testdir.getitem(
+ def test_xfail_evalfalse_but_fails(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail('False')
assert not hasattr(callreport, "wasxfail")
assert "xfail" in callreport.keywords
- def test_xfail_not_report_default(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_not_report_default(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail
assert 0
"""
)
- testdir.runpytest(p, "-v")
+ pytester.runpytest(p, "-v")
# result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
# ])
- def test_xfail_not_run_xfail_reporting(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_not_run_xfail_reporting(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="noway")
assert 1
"""
)
- result = testdir.runpytest(p, "-rx")
+ result = pytester.runpytest(p, "-rx")
result.stdout.fnmatch_lines(
[
"*test_one*test_this*",
]
)
- def test_xfail_not_run_no_setup_run(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_not_run_no_setup_run(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="hello")
raise ValueError(42)
"""
)
- result = testdir.runpytest(p, "-rx")
+ result = pytester.runpytest(p, "-rx")
result.stdout.fnmatch_lines(
["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"]
)
- def test_xfail_xpass(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_xpass(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail
assert 1
"""
)
- result = testdir.runpytest(p, "-rX")
+ result = pytester.runpytest(p, "-rX")
result.stdout.fnmatch_lines(["*XPASS*test_that*", "*1 xpassed*"])
assert result.ret == 0
- def test_xfail_imperative(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_imperative(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def test_this():
pytest.xfail("hello")
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
- result = testdir.runpytest(p, "-rx")
+ result = pytester.runpytest(p, "-rx")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
- result = testdir.runpytest(p, "--runxfail")
+ result = pytester.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines(["*1 pass*"])
- def test_xfail_imperative_in_setup_function(self, testdir):
- p = testdir.makepyfile(
+ def test_xfail_imperative_in_setup_function(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def setup_function(function):
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
- result = testdir.runpytest(p, "-rx")
+ result = pytester.runpytest(p, "-rx")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
- result = testdir.runpytest(p, "--runxfail")
+ result = pytester.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines(
"""
*def test_this*
"""
)
- def xtest_dynamic_xfail_set_during_setup(self, testdir):
- p = testdir.makepyfile(
+ def xtest_dynamic_xfail_set_during_setup(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def setup_function(function):
assert 1
"""
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*XPASS*test_that*"])
- def test_dynamic_xfail_no_run(self, testdir):
- p = testdir.makepyfile(
+ def test_dynamic_xfail_no_run(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
assert 0
"""
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"])
- def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
- p = testdir.makepyfile(
+ def test_dynamic_xfail_set_during_funcarg_setup(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
- def test_dynamic_xfail_set_during_runtest_failed(self, testdir: Testdir) -> None:
+ def test_dynamic_xfail_set_during_runtest_failed(self, pytester: Pytester) -> None:
# Issue #7486.
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
def test_this(request):
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.assert_outcomes(xfailed=1)
def test_dynamic_xfail_set_during_runtest_passed_strict(
- self, testdir: Testdir
+ self, pytester: Pytester
) -> None:
# Issue #7486.
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
def test_this(request):
request.node.add_marker(pytest.mark.xfail(reason="xfail", strict=True))
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.assert_outcomes(failed=1)
@pytest.mark.parametrize(
("(AttributeError, TypeError)", "IndexError", "*1 failed*"),
],
)
- def test_xfail_raises(self, expected, actual, matchline, testdir):
- p = testdir.makepyfile(
+ def test_xfail_raises(
+ self, expected, actual, matchline, pytester: Pytester
+ ) -> None:
+ p = pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(raises=%s)
"""
% (expected, actual)
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines([matchline])
- def test_strict_sanity(self, testdir):
+ def test_strict_sanity(self, pytester: Pytester) -> None:
"""Sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail."""
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
assert 0
"""
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"])
assert result.ret == 0
@pytest.mark.parametrize("strict", [True, False])
- def test_strict_xfail(self, testdir, strict):
- p = testdir.makepyfile(
+ def test_strict_xfail(self, pytester: Pytester, strict: bool) -> None:
+ p = pytester.makepyfile(
"""
import pytest
"""
% strict
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
if strict:
result.stdout.fnmatch_lines(
["*test_foo*", "*XPASS(strict)*unsupported feature*"]
]
)
assert result.ret == (1 if strict else 0)
- assert testdir.tmpdir.join("foo_executed").isfile()
+ assert pytester.path.joinpath("foo_executed").exists()
@pytest.mark.parametrize("strict", [True, False])
- def test_strict_xfail_condition(self, testdir, strict):
- p = testdir.makepyfile(
+ def test_strict_xfail_condition(self, pytester: Pytester, strict: bool) -> None:
+ p = pytester.makepyfile(
"""
import pytest
"""
% strict
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize("strict", [True, False])
- def test_xfail_condition_keyword(self, testdir, strict):
- p = testdir.makepyfile(
+ def test_xfail_condition_keyword(self, pytester: Pytester, strict: bool) -> None:
+ p = pytester.makepyfile(
"""
import pytest
"""
% strict
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize("strict_val", ["true", "false"])
- def test_strict_xfail_default_from_file(self, testdir, strict_val):
- testdir.makeini(
+ def test_strict_xfail_default_from_file(
+ self, pytester: Pytester, strict_val
+ ) -> None:
+ pytester.makeini(
"""
[pytest]
xfail_strict = %s
"""
% strict_val
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
pass
"""
)
- result = testdir.runpytest(p, "-rxX")
+ result = pytester.runpytest(p, "-rxX")
strict = strict_val == "true"
result.stdout.fnmatch_lines(["*1 failed*" if strict else "*1 xpassed*"])
assert result.ret == (1 if strict else 0)
+ def test_xfail_markeval_namespace(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
+ """
+ import pytest
+
+ def pytest_markeval_namespace():
+ return {"color": "green"}
+ """
+ )
+ p = pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.xfail("color == 'green'")
+ def test_1():
+ assert False
+
+ @pytest.mark.xfail("color == 'red'")
+ def test_2():
+ assert False
+ """
+ )
+ res = pytester.runpytest(p)
+ assert res.ret == 1
+ res.stdout.fnmatch_lines(["*1 failed*"])
+ res.stdout.fnmatch_lines(["*1 xfailed*"])
+
class TestXFailwithSetupTeardown:
- def test_failing_setup_issue9(self, testdir):
- testdir.makepyfile(
+ def test_failing_setup_issue9(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
def setup_function(func):
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 xfail*"])
- def test_failing_teardown_issue9(self, testdir):
- testdir.makepyfile(
+ def test_failing_teardown_issue9(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
def teardown_function(func):
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 xfail*"])
class TestSkip:
- def test_skip_class(self, testdir):
- testdir.makepyfile(
+ def test_skip_class(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
pass
"""
)
- rec = testdir.inline_run()
+ rec = pytester.inline_run()
rec.assertoutcome(skipped=2, passed=1)
- def test_skips_on_false_string(self, testdir):
- testdir.makepyfile(
+ def test_skips_on_false_string(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip('False')
pass
"""
)
- rec = testdir.inline_run()
+ rec = pytester.inline_run()
rec.assertoutcome(skipped=1)
- def test_arg_as_reason(self, testdir):
- testdir.makepyfile(
+ def test_arg_as_reason(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip('testing stuff')
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*testing stuff*", "*1 skipped*"])
- def test_skip_no_reason(self, testdir):
- testdir.makepyfile(
+ def test_skip_no_reason(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
- def test_skip_with_reason(self, testdir):
- testdir.makepyfile(
+ def test_skip_with_reason(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="for lolz")
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*for lolz*", "*1 skipped*"])
- def test_only_skips_marked_test(self, testdir):
- testdir.makepyfile(
+ def test_only_skips_marked_test(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
assert True
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*nothing in particular*", "*1 passed*2 skipped*"])
- def test_strict_and_skip(self, testdir):
- testdir.makepyfile(
+ def test_strict_and_skip(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
class TestSkipif:
- def test_skipif_conditional(self, testdir):
- item = testdir.getitem(
+ def test_skipif_conditional(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
@pytest.mark.parametrize(
"params", ["\"hasattr(sys, 'platform')\"", 'True, reason="invalid platform"']
)
- def test_skipif_reporting(self, testdir, params):
- p = testdir.makepyfile(
+ def test_skipif_reporting(self, pytester: Pytester, params) -> None:
+ p = pytester.makepyfile(
test_foo="""
import pytest
@pytest.mark.skipif(%(params)s)
"""
% dict(params=params)
)
- result = testdir.runpytest(p, "-s", "-rs")
+ result = pytester.runpytest(p, "-s", "-rs")
result.stdout.fnmatch_lines(["*SKIP*1*test_foo.py*platform*", "*1 skipped*"])
assert result.ret == 0
- def test_skipif_using_platform(self, testdir):
- item = testdir.getitem(
+ def test_skipif_using_platform(self, pytester: Pytester) -> None:
+ item = pytester.getitem(
"""
import pytest
@pytest.mark.skipif("platform.platform() == platform.platform()")
"marker, msg1, msg2",
[("skipif", "SKIP", "skipped"), ("xfail", "XPASS", "xpassed")],
)
- def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
- testdir.makepyfile(
+ def test_skipif_reporting_multiple(
+ self, pytester: Pytester, marker, msg1, msg2
+ ) -> None:
+ pytester.makepyfile(
test_foo="""
import pytest
@pytest.mark.{marker}(False, reason='first_condition')
marker=marker
)
)
- result = testdir.runpytest("-s", "-rsxX")
+ result = pytester.runpytest("-s", "-rsxX")
result.stdout.fnmatch_lines(
- [
- "*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
- "*1 {msg2}*".format(msg2=msg2),
- ]
+ [f"*{msg1}*test_foo.py*second_condition*", f"*1 {msg2}*"]
)
assert result.ret == 0
-def test_skip_not_report_default(testdir):
- p = testdir.makepyfile(
+def test_skip_not_report_default(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
test_one="""
import pytest
def test_this():
pytest.skip("hello")
"""
)
- result = testdir.runpytest(p, "-v")
+ result = pytester.runpytest(p, "-v")
result.stdout.fnmatch_lines(
[
# "*HINT*use*-r*",
)
-def test_skipif_class(testdir):
- p = testdir.makepyfile(
+def test_skipif_class(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["*2 skipped*"])
-def test_skipped_reasons_functional(testdir):
- testdir.makepyfile(
+def test_skipped_reasons_functional(pytester: Pytester) -> None:
+ pytester.makepyfile(
test_one="""
import pytest
from conftest import doskip
pytest.skip('test')
""",
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines_random(
[
"SKIPPED [[]2[]] conftest.py:4: test",
assert result.ret == 0
-def test_skipped_folding(testdir):
- testdir.makepyfile(
+def test_skipped_folding(pytester: Pytester) -> None:
+ pytester.makepyfile(
test_one="""
import pytest
pytestmark = pytest.mark.skip("Folding")
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(["*SKIP*2*test_one.py: Folding"])
assert result.ret == 0
-def test_reportchars(testdir):
- testdir.makepyfile(
+def test_reportchars(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
def test_1():
pytest.skip("four")
"""
)
- result = testdir.runpytest("-rfxXs")
+ result = pytester.runpytest("-rfxXs")
result.stdout.fnmatch_lines(
["FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*"]
)
-def test_reportchars_error(testdir):
- testdir.makepyfile(
+def test_reportchars_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
pass
""",
)
- result = testdir.runpytest("-rE")
+ result = pytester.runpytest("-rE")
result.stdout.fnmatch_lines(["ERROR*test_foo*"])
-def test_reportchars_all(testdir):
- testdir.makepyfile(
+def test_reportchars_all(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
def test_1():
pass
"""
)
- result = testdir.runpytest("-ra")
+ result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"SKIP*four*",
)
-def test_reportchars_all_error(testdir):
- testdir.makepyfile(
+def test_reportchars_all_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
pass
""",
)
- result = testdir.runpytest("-ra")
+ result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(["ERROR*test_foo*"])
-def test_errors_in_xfail_skip_expressions(testdir) -> None:
- testdir.makepyfile(
+def test_errors_in_xfail_skip_expressions(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skipif("asd")
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
markline = " ^"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info is not None and pypy_version_info < (6,):
)
-def test_xfail_skipif_with_globals(testdir):
- testdir.makepyfile(
+def test_xfail_skipif_with_globals(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
x = 3
assert 0
"""
)
- result = testdir.runpytest("-rsx")
+ result = pytester.runpytest("-rsx")
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
-def test_default_markers(testdir):
- result = testdir.runpytest("--markers")
+def test_default_markers(pytester: Pytester) -> None:
+ result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(
[
"*skipif(condition, ..., [*], reason=...)*skip*",
)
-def test_xfail_test_setup_exception(testdir):
- testdir.makeconftest(
+def test_xfail_test_setup_exception(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_runtest_setup():
0 / 0
"""
)
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail
assert 0
"""
)
- result = testdir.runpytest(p)
+ result = pytester.runpytest(p)
assert result.ret == 0
assert "xfailed" in result.stdout.str()
result.stdout.no_fnmatch_line("*xpassed*")
-def test_imperativeskip_on_xfail_test(testdir):
- testdir.makepyfile(
+def test_imperativeskip_on_xfail_test(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail
pass
"""
)
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
"""
)
- result = testdir.runpytest("-rsxX")
+ result = pytester.runpytest("-rsxX")
result.stdout.fnmatch_lines_random(
"""
*SKIP*abc*
class TestBooleanCondition:
- def test_skipif(self, testdir):
- testdir.makepyfile(
+ def test_skipif(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skipif(True, reason="True123")
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*1 skipped*
"""
)
- def test_skipif_noreason(self, testdir):
- testdir.makepyfile(
+ def test_skipif_noreason(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
pass
"""
)
- result = testdir.runpytest("-rs")
+ result = pytester.runpytest("-rs")
result.stdout.fnmatch_lines(
"""
*1 error*
"""
)
- def test_xfail(self, testdir):
- testdir.makepyfile(
+ def test_xfail(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(True, reason="True123")
assert 0
"""
)
- result = testdir.runpytest("-rxs")
+ result = pytester.runpytest("-rxs")
result.stdout.fnmatch_lines(
"""
*XFAIL*
)
-def test_xfail_item(testdir):
+def test_xfail_item(pytester: Pytester) -> None:
# Ensure pytest.xfail works with non-Python Item
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
return MyItem.from_parent(name="foo", parent=parent)
"""
)
- result = testdir.inline_run()
+ result = pytester.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, "wasxfail")]
assert xfailed
-def test_module_level_skip_error(testdir):
+def test_module_level_skip_error(pytester: Pytester) -> None:
"""Verify that using pytest.skip at module level causes a collection error."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
pytest.skip("skip_module_level")
assert True
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
["*Using pytest.skip outside of a test is not allowed*"]
)
-def test_module_level_skip_with_allow_module_level(testdir):
+def test_module_level_skip_with_allow_module_level(pytester: Pytester) -> None:
"""Verify that using pytest.skip(allow_module_level=True) is allowed."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
pytest.skip("skip_module_level", allow_module_level=True)
assert 0
"""
)
- result = testdir.runpytest("-rxs")
+ result = pytester.runpytest("-rxs")
result.stdout.fnmatch_lines(["*SKIP*skip_module_level"])
-def test_invalid_skip_keyword_parameter(testdir):
+def test_invalid_skip_keyword_parameter(pytester: Pytester) -> None:
"""Verify that using pytest.skip() with unknown parameter raises an error."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
pytest.skip("skip_module_level", unknown=1)
assert 0
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*TypeError:*['unknown']*"])
-def test_mark_xfail_item(testdir):
+def test_mark_xfail_item(pytester: Pytester) -> None:
# Ensure pytest.mark.xfail works with non-Python Item
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import pytest
return MyItem.from_parent(name="foo", parent=parent)
"""
)
- result = testdir.inline_run()
+ result = pytester.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, "wasxfail")]
assert xfailed
-def test_summary_list_after_errors(testdir):
+def test_summary_list_after_errors(pytester: Pytester) -> None:
"""Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
def test_fail():
assert 0
"""
)
- result = testdir.runpytest("-ra")
+ result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"=* FAILURES *=",
)
-def test_importorskip():
+def test_importorskip() -> None:
with pytest.raises(
pytest.skip.Exception,
match="^could not import 'doesnotexist': No module named .*",
pytest.importorskip("doesnotexist")
-def test_relpath_rootdir(testdir):
- testdir.makepyfile(
+def test_relpath_rootdir(pytester: Pytester) -> None:
+ pytester.makepyfile(
**{
"tests/test_1.py": """
import pytest
""",
}
)
- result = testdir.runpytest("-rs", "tests/test_1.py", "--rootdir=tests")
+ result = pytester.runpytest("-rs", "tests/test_1.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
["SKIPPED [[]1[]] tests/test_1.py:2: unconditional skip"]
)
import pytest
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.pytester import Pytester
@pytest.fixture
-def stepwise_testdir(testdir):
+def stepwise_pytester(pytester: Pytester) -> Pytester:
# Rather than having to modify our testfile between tests, we introduce
# a flag for whether or not the second test should fail.
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_addoption(parser):
group = parser.getgroup('general')
)
# Create a simple test suite.
- testdir.makepyfile(
+ pytester.makepyfile(
test_a="""
def test_success_before_fail():
assert 1
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
test_b="""
def test_success():
assert 1
)
# customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
cache_dir = .cache
"""
)
- return testdir
+ return pytester
@pytest.fixture
-def error_testdir(testdir):
- testdir.makepyfile(
+def error_pytester(pytester: Pytester) -> Pytester:
+ pytester.makepyfile(
test_a="""
def test_error(nonexisting_fixture):
assert 1
"""
)
- return testdir
+ return pytester
@pytest.fixture
-def broken_testdir(testdir):
- testdir.makepyfile(
+def broken_pytester(pytester: Pytester) -> Pytester:
+ pytester.makepyfile(
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
)
- return testdir
+ return pytester
def _strip_resource_warnings(lines):
]
-def test_run_without_stepwise(stepwise_testdir):
- result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail")
-
+def test_run_without_stepwise(stepwise_pytester: Pytester) -> None:
+ result = stepwise_pytester.runpytest("-v", "--strict-markers", "--fail")
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
-def test_fail_and_continue_with_stepwise(stepwise_testdir):
+def test_stepwise_output_summary(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ """
+ import pytest
+ @pytest.mark.parametrize("expected", [True, True, True, True, False])
+ def test_data(expected):
+ assert expected
+ """
+ )
+ result = pytester.runpytest("-v", "--stepwise")
+ result.stdout.fnmatch_lines(["stepwise: no previously failed tests, not skipping."])
+ result = pytester.runpytest("-v", "--stepwise")
+ result.stdout.fnmatch_lines(
+ ["stepwise: skipping 4 already passed items.", "*1 failed, 4 deselected*"]
+ )
+
+
+def test_fail_and_continue_with_stepwise(stepwise_pytester: Pytester) -> None:
# Run the tests with a failing second test.
- result = stepwise_testdir.runpytest(
+ result = stepwise_pytester.runpytest(
"-v", "--strict-markers", "--stepwise", "--fail"
)
assert _strip_resource_warnings(result.stderr.lines) == []
assert "test_success_after_fail" not in stdout
# "Fix" the test that failed in the last run and run it again.
- result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise")
+ result = stepwise_pytester.runpytest("-v", "--strict-markers", "--stepwise")
assert _strip_resource_warnings(result.stderr.lines) == []
stdout = result.stdout.str()
assert "test_success_after_fail PASSED" in stdout
-def test_run_with_skip_option(stepwise_testdir):
- result = stepwise_testdir.runpytest(
- "-v",
- "--strict-markers",
- "--stepwise",
- "--stepwise-skip",
- "--fail",
- "--fail-last",
+@pytest.mark.parametrize("stepwise_skip", ["--stepwise-skip", "--sw-skip"])
+def test_run_with_skip_option(stepwise_pytester: Pytester, stepwise_skip: str) -> None:
+ result = stepwise_pytester.runpytest(
+ "-v", "--strict-markers", "--stepwise", stepwise_skip, "--fail", "--fail-last",
)
assert _strip_resource_warnings(result.stderr.lines) == []
assert "test_success_after_last_fail" not in stdout
-def test_fail_on_errors(error_testdir):
- result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise")
+def test_fail_on_errors(error_pytester: Pytester) -> None:
+ result = error_pytester.runpytest("-v", "--strict-markers", "--stepwise")
assert _strip_resource_warnings(result.stderr.lines) == []
stdout = result.stdout.str()
assert "test_success_after_fail" not in stdout
-def test_change_testfile(stepwise_testdir):
- result = stepwise_testdir.runpytest(
+def test_change_testfile(stepwise_pytester: Pytester) -> None:
+ result = stepwise_pytester.runpytest(
"-v", "--strict-markers", "--stepwise", "--fail", "test_a.py"
)
assert _strip_resource_warnings(result.stderr.lines) == []
# Make sure the second test run starts from the beginning, since the
# test to continue from does not exist in testfile_b.
- result = stepwise_testdir.runpytest(
+ result = stepwise_pytester.runpytest(
"-v", "--strict-markers", "--stepwise", "test_b.py"
)
assert _strip_resource_warnings(result.stderr.lines) == []
@pytest.mark.parametrize("broken_first", [True, False])
-def test_stop_on_collection_errors(broken_testdir, broken_first):
+def test_stop_on_collection_errors(
+ broken_pytester: Pytester, broken_first: bool
+) -> None:
"""Stop during collection errors. Broken test first or broken test last
actually surfaced a bug (#5444), so we test both situations."""
files = ["working_testfile.py", "broken_testfile.py"]
if broken_first:
files.reverse()
- result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
+ result = broken_pytester.runpytest("-v", "--strict-markers", "--stepwise", *files)
result.stdout.fnmatch_lines("*error during collection*")
-def test_xfail_handling(testdir, monkeypatch):
+def test_xfail_handling(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
(#5547)
def test_c(): pass
def test_d(): pass
"""
- testdir.makepyfile(contents.format(assert_value="0", strict="False"))
- result = testdir.runpytest("--sw", "-v")
+ pytester.makepyfile(contents.format(assert_value="0", strict="False"))
+ result = pytester.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_a PASSED *",
]
)
- testdir.makepyfile(contents.format(assert_value="1", strict="True"))
- result = testdir.runpytest("--sw", "-v")
+ pytester.makepyfile(contents.format(assert_value="1", strict="True"))
+ result = pytester.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_a PASSED *",
]
)
- testdir.makepyfile(contents.format(assert_value="0", strict="True"))
- result = testdir.runpytest("--sw", "-v")
+ pytester.makepyfile(contents.format(assert_value="0", strict="True"))
+ result = pytester.runpytest("--sw", "-v")
result.stdout.fnmatch_lines(
[
"*::test_b XFAIL *",
import sys
import textwrap
from io import StringIO
+from pathlib import Path
+from types import SimpleNamespace
from typing import cast
from typing import Dict
from typing import List
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
-from _pytest.pathlib import Path
-from _pytest.pytester import Testdir
+from _pytest.pytester import Pytester
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
+from _pytest.reports import TestReport
from _pytest.terminal import _folded_skips
+from _pytest.terminal import _format_trimmed
from _pytest.terminal import _get_line_with_reprcrash_message
+from _pytest.terminal import _get_raw_skip_reason
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter
class TestTerminal:
- def test_pass_skip_fail(self, testdir, option):
- testdir.makepyfile(
+ def test_pass_skip_fail(self, pytester: Pytester, option) -> None:
+ pytester.makepyfile(
"""
import pytest
def test_ok():
assert 0
"""
)
- result = testdir.runpytest(*option.args)
+ result = pytester.runpytest(*option.args)
if option.verbosity > 0:
result.stdout.fnmatch_lines(
[
[" def test_func():", "> assert 0", "E assert 0"]
)
- def test_internalerror(self, testdir, linecomp):
- modcol = testdir.getmodulecol("def test_one(): pass")
+ def test_internalerror(self, pytester: Pytester, linecomp) -> None:
+ modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
with pytest.raises(ValueError) as excinfo:
raise ValueError("hello")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
- def test_writeline(self, testdir, linecomp):
- modcol = testdir.getmodulecol("def test_one(): pass")
+ def test_writeline(self, pytester: Pytester, linecomp) -> None:
+ modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
- def test_show_runtest_logstart(self, testdir, linecomp):
- item = testdir.getitem("def test_func(): pass")
+ def test_show_runtest_logstart(self, pytester: Pytester, linecomp) -> None:
+ item = pytester.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
- def test_runtest_location_shown_before_test_starts(self, testdir):
- testdir.makepyfile(
+ def test_runtest_location_shown_before_test_starts(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
- child = testdir.spawn_pytest("")
+ child = pytester.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
- def test_report_collect_after_half_a_second(self, testdir):
+ def test_report_collect_after_half_a_second(
+ self, pytester: Pytester, monkeypatch: MonkeyPatch
+ ) -> None:
"""Test for "collecting" being updated after 0.5s"""
- testdir.makepyfile(
+ pytester.makepyfile(
**{
"test1.py": """
import _pytest.terminal
}
)
# Explicitly test colored output.
- testdir.monkeypatch.setenv("PY_COLORS", "1")
+ monkeypatch.setenv("PY_COLORS", "1")
- child = testdir.spawn_pytest("-v test1.py test2.py")
+ child = pytester.spawn_pytest("-v test1.py test2.py")
child.expect(r"collecting \.\.\.")
child.expect(r"collecting 1 item")
child.expect(r"collecting 2 items")
rest = child.read().decode("utf8")
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
- def test_itemreport_subclasses_show_subclassed_file(self, testdir):
- testdir.makepyfile(
+ def test_itemreport_subclasses_show_subclassed_file(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile(
**{
"tests/test_p1": """
class BaseTests(object):
""",
}
)
- result = testdir.runpytest("tests/test_p2.py", "--rootdir=tests")
+ result = pytester.runpytest("tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(["tests/test_p2.py .*", "=* 1 passed in *"])
- result = testdir.runpytest("-vv", "-rA", "tests/test_p2.py", "--rootdir=tests")
+ result = pytester.runpytest("-vv", "-rA", "tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p2.py::TestMore::test_p1 <- test_p1.py PASSED *",
"PASSED tests/test_p2.py::TestMore::test_p1",
]
)
- result = testdir.runpytest("-vv", "-rA", "tests/test_p3.py", "--rootdir=tests")
+ result = pytester.runpytest("-vv", "-rA", "tests/test_p3.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p3.py::TestMore::test_p1 <- test_p1.py FAILED *",
]
)
- def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
- a = testdir.mkpydir("a123")
- a.join("test_hello123.py").write(
+ def test_itemreport_directclasses_not_shown_as_subclasses(
+ self, pytester: Pytester
+ ) -> None:
+ a = pytester.mkpydir("a123")
+ a.joinpath("test_hello123.py").write_text(
textwrap.dedent(
"""\
class TestClass(object):
"""
)
)
- result = testdir.runpytest("-vv")
+ result = pytester.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
result.stdout.no_fnmatch_line("* <- *")
@pytest.mark.parametrize("fulltrace", ("", "--fulltrace"))
- def test_keyboard_interrupt(self, testdir, fulltrace):
- testdir.makepyfile(
+ def test_keyboard_interrupt(self, pytester: Pytester, fulltrace) -> None:
+ pytester.makepyfile(
"""
def test_foobar():
assert 0
"""
)
- result = testdir.runpytest(fulltrace, no_reraise_ctrlc=True)
+ result = pytester.runpytest(fulltrace, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
- def test_keyboard_in_sessionstart(self, testdir):
- testdir.makeconftest(
+ def test_keyboard_in_sessionstart(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
- result = testdir.runpytest(no_reraise_ctrlc=True)
+ result = pytester.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
- def test_collect_single_item(self, testdir):
+ def test_collect_single_item(self, pytester: Pytester) -> None:
"""Use singular 'item' when reporting a single test item"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
- def test_rewrite(self, testdir, monkeypatch):
- config = testdir.parseconfig()
+ def test_rewrite(self, pytester: Pytester, monkeypatch) -> None:
+ config = pytester.parseconfig()
f = StringIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
def test_report_teststatus_explicit_markup(
- self, testdir: Testdir, color_mapping
+ self, monkeypatch: MonkeyPatch, pytester: Pytester, color_mapping
) -> None:
"""Test that TerminalReporter handles markup explicitly provided by
a pytest_report_teststatus hook."""
- testdir.monkeypatch.setenv("PY_COLORS", "1")
- testdir.makeconftest(
+ monkeypatch.setenv("PY_COLORS", "1")
+ pytester.makeconftest(
"""
def pytest_report_teststatus(report):
return 'foo', 'F', ('FOO', {'red': True})
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
- result = testdir.runpytest("-v")
+ result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(["*{red}FOO{reset}*"])
)
+ def test_verbose_skip_reason(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
+ """
+ import pytest
+
+ @pytest.mark.skip(reason="123")
+ def test_1():
+ pass
+
+ @pytest.mark.xfail(reason="456")
+ def test_2():
+ pass
+
+ @pytest.mark.xfail(reason="789")
+ def test_3():
+ assert False
+
+ @pytest.mark.xfail(reason="")
+ def test_4():
+ assert False
+ """
+ )
+ result = pytester.runpytest("-v")
+ result.stdout.fnmatch_lines(
+ [
+ "test_verbose_skip_reason.py::test_1 SKIPPED (123) *",
+ "test_verbose_skip_reason.py::test_2 XPASS (456) *",
+ "test_verbose_skip_reason.py::test_3 XFAIL (789) *",
+ "test_verbose_skip_reason.py::test_4 XFAIL *",
+ ]
+ )
+
class TestCollectonly:
- def test_collectonly_basic(self, testdir):
- testdir.makepyfile(
+ def test_collectonly_basic(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_func():
pass
"""
)
- result = testdir.runpytest("--collect-only")
+ result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module test_collectonly_basic.py>", " <Function test_func>"]
)
- def test_collectonly_skipped_module(self, testdir):
- testdir.makepyfile(
+ def test_collectonly_skipped_module(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
- result = testdir.runpytest("--collect-only", "-rs")
+ result = pytester.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_displays_test_description(
- self, testdir: Testdir, dummy_yaml_custom_test
+ self, pytester: Pytester, dummy_yaml_custom_test
) -> None:
"""Used dummy_yaml_custom_test for an Item without ``obj``."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_with_description():
''' This test has a description.
more2.'''
"""
)
- result = testdir.runpytest("--collect-only", "--verbose")
+ result = pytester.runpytest("--collect-only", "--verbose")
result.stdout.fnmatch_lines(
[
"<YamlFile test1.yaml>",
consecutive=True,
)
- def test_collectonly_failed_module(self, testdir):
- testdir.makepyfile("""raise ValueError(0)""")
- result = testdir.runpytest("--collect-only")
+ def test_collectonly_failed_module(self, pytester: Pytester) -> None:
+ pytester.makepyfile("""raise ValueError(0)""")
+ result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
- def test_collectonly_fatal(self, testdir):
- testdir.makeconftest(
+ def test_collectonly_fatal(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
- result = testdir.runpytest("--collect-only")
+ result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
- def test_collectonly_simple(self, testdir):
- p = testdir.makepyfile(
+ def test_collectonly_simple(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
def test_func1():
pass
pass
"""
)
- result = testdir.runpytest("--collect-only", p)
+ result = pytester.runpytest("--collect-only", p)
# assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
result.stdout.fnmatch_lines(
]
)
- def test_collectonly_error(self, testdir):
- p = testdir.makepyfile("import Errlkjqweqwe")
- result = testdir.runpytest("--collect-only", p)
+ def test_collectonly_error(self, pytester: Pytester) -> None:
+ p = pytester.makepyfile("import Errlkjqweqwe")
+ result = pytester.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
textwrap.dedent(
).strip()
)
- def test_collectonly_missing_path(self, testdir):
+ def test_collectonly_missing_path(self, pytester: Pytester) -> None:
"""Issue 115: failure in parseargs will cause session not to
have the items attribute."""
- result = testdir.runpytest("--collect-only", "uhm_missing_path")
+ result = pytester.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(
["*ERROR: file or directory not found: uhm_missing_path"]
)
- def test_collectonly_quiet(self, testdir):
- testdir.makepyfile("def test_foo(): pass")
- result = testdir.runpytest("--collect-only", "-q")
+ def test_collectonly_quiet(self, pytester: Pytester) -> None:
+ pytester.makepyfile("def test_foo(): pass")
+ result = pytester.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
- def test_collectonly_more_quiet(self, testdir):
- testdir.makepyfile(test_fun="def test_foo(): pass")
- result = testdir.runpytest("--collect-only", "-qq")
+ def test_collectonly_more_quiet(self, pytester: Pytester) -> None:
+ pytester.makepyfile(test_fun="def test_foo(): pass")
+ result = pytester.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
+ def test_collect_only_summary_status(self, pytester: Pytester) -> None:
+ """Custom status depending on test selection using -k or -m. #7701."""
+ pytester.makepyfile(
+ test_collect_foo="""
+ def test_foo(): pass
+ """,
+ test_collect_bar="""
+ def test_foobar(): pass
+ def test_bar(): pass
+ """,
+ )
+ result = pytester.runpytest("--collect-only")
+ result.stdout.fnmatch_lines("*== 3 tests collected in * ==*")
+
+ result = pytester.runpytest("--collect-only", "test_collect_foo.py")
+ result.stdout.fnmatch_lines("*== 1 test collected in * ==*")
+
+ result = pytester.runpytest("--collect-only", "-k", "foo")
+ result.stdout.fnmatch_lines("*== 2/3 tests collected (1 deselected) in * ==*")
+
+ result = pytester.runpytest("--collect-only", "-k", "test_bar")
+ result.stdout.fnmatch_lines("*== 1/3 tests collected (2 deselected) in * ==*")
+
+ result = pytester.runpytest("--collect-only", "-k", "invalid")
+ result.stdout.fnmatch_lines("*== no tests collected (3 deselected) in * ==*")
+
+ pytester.mkdir("no_tests_here")
+ result = pytester.runpytest("--collect-only", "no_tests_here")
+ result.stdout.fnmatch_lines("*== no tests collected in * ==*")
+
+ pytester.makepyfile(
+ test_contains_error="""
+ raise RuntimeError
+ """,
+ )
+ result = pytester.runpytest("--collect-only")
+ result.stdout.fnmatch_lines("*== 3 tests collected, 1 error in * ==*")
+ result = pytester.runpytest("--collect-only", "-k", "foo")
+ result.stdout.fnmatch_lines(
+ "*== 2/3 tests collected (1 deselected), 1 error in * ==*"
+ )
+
class TestFixtureReporting:
- def test_setup_fixture_error(self, testdir):
- testdir.makepyfile(
+ def test_setup_fixture_error(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
)
assert result.ret != 0
- def test_teardown_fixture_error(self, testdir):
- testdir.makepyfile(
+ def test_teardown_fixture_error(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_nada():
pass
assert 0
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
]
)
- def test_teardown_fixture_error_and_test_failure(self, testdir):
- testdir.makepyfile(
+ def test_teardown_fixture_error_and_test_failure(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
assert False
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
]
)
- def test_setup_teardown_output_and_test_failure(self, testdir):
+ def test_setup_teardown_output_and_test_failure(self, pytester: Pytester) -> None:
"""Test for issue #442."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
print("teardown func")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
class TestTerminalFunctional:
- def test_deselected(self, testdir):
- testpath = testdir.makepyfile(
+ def test_deselected(self, pytester: Pytester) -> None:
+ testpath = pytester.makepyfile(
"""
def test_one():
pass
pass
"""
)
- result = testdir.runpytest("-k", "test_two:", testpath)
+ result = pytester.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected / 2 selected", "*test_deselected.py ..*"]
)
assert result.ret == 0
- def test_deselected_with_hookwrapper(self, testdir):
- testpath = testdir.makeconftest(
+ def test_deselected_with_hookwrapper(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
config.hook.pytest_deselected(items=[deselected])
"""
)
- testpath = testdir.makepyfile(
+ testpath = pytester.makepyfile(
"""
def test_one():
pass
pass
"""
)
- result = testdir.runpytest(testpath)
+ result = pytester.runpytest(testpath)
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
)
assert result.ret == 0
- def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir):
- testdir.makepyfile(
+ def test_show_deselected_items_using_markexpr_before_test_execution(
+ self, pytester: Pytester
+ ) -> None:
+ pytester.makepyfile(
test_show_deselected="""
import pytest
pass
"""
)
- result = testdir.runpytest("-m", "not foo")
+ result = pytester.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
- def test_no_skip_summary_if_failure(self, testdir):
- testdir.makepyfile(
+ def test_no_skip_summary_if_failure(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
def test_ok():
pytest.skip("dontshow")
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
- def test_passes(self, testdir):
- p1 = testdir.makepyfile(
+ def test_passes(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_passes():
pass
pass
"""
)
- old = p1.dirpath().chdir()
+ old = p1.parent
+ pytester.chdir()
try:
- result = testdir.runpytest()
+ result = pytester.runpytest()
finally:
- old.chdir()
+ os.chdir(old)
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
- def test_header_trailer_info(self, testdir, request):
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- testdir.makepyfile(
+ def test_header_trailer_info(
+ self, monkeypatch: MonkeyPatch, pytester: Pytester, request
+ ) -> None:
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
+ pytester.makepyfile(
"""
def test_passes():
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
- def test_no_header_trailer_info(self, testdir, request):
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- testdir.makepyfile(
+ def test_no_header_trailer_info(
+ self, monkeypatch: MonkeyPatch, pytester: Pytester, request
+ ) -> None:
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
+ pytester.makepyfile(
"""
def test_passes():
pass
"""
)
- result = testdir.runpytest("--no-header")
+ result = pytester.runpytest("--no-header")
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.no_fnmatch_line(
"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s"
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.no_fnmatch_line("plugins: *")
- def test_header(self, testdir):
- testdir.tmpdir.join("tests").ensure_dir()
- testdir.tmpdir.join("gui").ensure_dir()
+ def test_header(self, pytester: Pytester) -> None:
+ pytester.path.joinpath("tests").mkdir()
+ pytester.path.joinpath("gui").mkdir()
# no ini file
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0"])
# with configfile
- testdir.makeini("""[pytest]""")
- result = testdir.runpytest()
+ pytester.makeini("""[pytest]""")
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
# with testpaths option, and not passing anything in the command-line
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
["rootdir: *test_header0, configfile: tox.ini, testpaths: tests, gui"]
)
# with testpaths option, passing directory in command-line: do not show testpaths then
- result = testdir.runpytest("tests")
+ result = pytester.runpytest("tests")
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
def test_header_absolute_testpath(
- self, testdir: Testdir, monkeypatch: MonkeyPatch
+ self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regresstion test for #7814."""
- tests = testdir.tmpdir.join("tests")
- tests.ensure_dir()
- testdir.makepyprojecttoml(
+ tests = pytester.path.joinpath("tests")
+ tests.mkdir()
+ pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
testpaths = ['{}']
tests
)
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"rootdir: *absolute_testpath0, configfile: pyproject.toml, testpaths: {}".format(
]
)
- def test_no_header(self, testdir):
- testdir.tmpdir.join("tests").ensure_dir()
- testdir.tmpdir.join("gui").ensure_dir()
+ def test_no_header(self, pytester: Pytester) -> None:
+ pytester.path.joinpath("tests").mkdir()
+ pytester.path.joinpath("gui").mkdir()
# with testpaths option, and not passing anything in the command-line
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
- result = testdir.runpytest("--no-header")
+ result = pytester.runpytest("--no-header")
result.stdout.no_fnmatch_line(
"rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui"
)
# with testpaths option, passing directory in command-line: do not show testpaths then
- result = testdir.runpytest("tests", "--no-header")
+ result = pytester.runpytest("tests", "--no-header")
result.stdout.no_fnmatch_line("rootdir: *test_header0, inifile: tox.ini")
- def test_no_summary(self, testdir):
- p1 = testdir.makepyfile(
+ def test_no_summary(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_no_summary():
assert false
"""
)
- result = testdir.runpytest(p1, "--no-summary")
+ result = pytester.runpytest(p1, "--no-summary")
result.stdout.no_fnmatch_line("*= FAILURES =*")
- def test_showlocals(self, testdir):
- p1 = testdir.makepyfile(
+ def test_showlocals(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_showlocals():
x = 3
assert 0
"""
)
- result = testdir.runpytest(p1, "-l")
+ result = pytester.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
# "_ _ * Locals *",
]
)
- def test_showlocals_short(self, testdir):
- p1 = testdir.makepyfile(
+ def test_showlocals_short(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile(
"""
def test_showlocals_short():
x = 3
assert 0
"""
)
- result = testdir.runpytest(p1, "-l", "--tb=short")
+ result = pytester.runpytest(p1, "-l", "--tb=short")
result.stdout.fnmatch_lines(
[
"test_showlocals_short.py:*",
)
@pytest.fixture
- def verbose_testfile(self, testdir):
- return testdir.makepyfile(
+ def verbose_testfile(self, pytester: Pytester) -> Path:
+ return pytester.makepyfile(
"""
import pytest
def test_fail():
"""
)
- def test_verbose_reporting(self, verbose_testfile, testdir):
- result = testdir.runpytest(
+ def test_verbose_reporting(self, verbose_testfile, pytester: Pytester) -> None:
+ result = pytester.runpytest(
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
)
assert result.ret == 1
- def test_verbose_reporting_xdist(self, verbose_testfile, testdir, pytestconfig):
+ def test_verbose_reporting_xdist(
+ self,
+ verbose_testfile,
+ monkeypatch: MonkeyPatch,
+ pytester: Pytester,
+ pytestconfig,
+ ) -> None:
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
- testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
- result = testdir.runpytest(
+ monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
+ result = pytester.runpytest(
verbose_testfile, "-v", "-n 1", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
)
assert result.ret == 1
- def test_quiet_reporting(self, testdir):
- p1 = testdir.makepyfile("def test_pass(): pass")
- result = testdir.runpytest(p1, "-q")
+ def test_quiet_reporting(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test_pass(): pass")
+ result = pytester.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
- assert p1.basename not in s
+ assert p1.name not in s
assert "===" not in s
assert "passed" in s
- def test_more_quiet_reporting(self, testdir):
- p1 = testdir.makepyfile("def test_pass(): pass")
- result = testdir.runpytest(p1, "-qq")
+ def test_more_quiet_reporting(self, pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("def test_pass(): pass")
+ result = pytester.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
- assert p1.basename not in s
+ assert p1.name not in s
assert "===" not in s
assert "passed" not in s
@pytest.mark.parametrize(
"params", [(), ("--collect-only",)], ids=["no-params", "collect-only"]
)
- def test_report_collectionfinish_hook(self, testdir, params):
- testdir.makeconftest(
+ def test_report_collectionfinish_hook(self, pytester: Pytester, params) -> None:
+ pytester.makeconftest(
"""
def pytest_report_collectionfinish(config, startdir, items):
return ['hello from hook: {0} items'.format(len(items))]
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
pass
"""
)
- result = testdir.runpytest(*params)
+ result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
- def test_summary_f_alias(self, testdir):
+ def test_summary_f_alias(self, pytester: Pytester) -> None:
"""Test that 'f' and 'F' report chars are aliases and don't show up twice in the summary (#6334)"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test():
assert False
"""
)
- result = testdir.runpytest("-rfF")
+ result = pytester.runpytest("-rfF")
expected = "FAILED test_summary_f_alias.py::test - assert False"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
- def test_summary_s_alias(self, testdir):
+ def test_summary_s_alias(self, pytester: Pytester) -> None:
"""Test that 's' and 'S' report chars are aliases and don't show up twice in the summary"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
pass
"""
)
- result = testdir.runpytest("-rsS")
+ result = pytester.runpytest("-rsS")
expected = "SKIPPED [1] test_summary_s_alias.py:3: unconditional skip"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
-def test_fail_extra_reporting(testdir, monkeypatch):
+def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
monkeypatch.setenv("COLUMNS", "80")
- testdir.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
- result = testdir.runpytest("-rN")
+ pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
+ result = pytester.runpytest("-rN")
result.stdout.no_fnmatch_line("*short test summary*")
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test summary*",
)
-def test_fail_reporting_on_pass(testdir):
- testdir.makepyfile("def test_this(): assert 1")
- result = testdir.runpytest("-rf")
+def test_fail_reporting_on_pass(pytester: Pytester) -> None:
+ pytester.makepyfile("def test_this(): assert 1")
+ result = pytester.runpytest("-rf")
result.stdout.no_fnmatch_line("*short test summary*")
-def test_pass_extra_reporting(testdir):
- testdir.makepyfile("def test_this(): assert 1")
- result = testdir.runpytest()
+def test_pass_extra_reporting(pytester: Pytester) -> None:
+ pytester.makepyfile("def test_this(): assert 1")
+ result = pytester.runpytest()
result.stdout.no_fnmatch_line("*short test summary*")
- result = testdir.runpytest("-rp")
+ result = pytester.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
-def test_pass_reporting_on_fail(testdir):
- testdir.makepyfile("def test_this(): assert 0")
- result = testdir.runpytest("-rp")
+def test_pass_reporting_on_fail(pytester: Pytester) -> None:
+ pytester.makepyfile("def test_this(): assert 0")
+ result = pytester.runpytest("-rp")
result.stdout.no_fnmatch_line("*short test summary*")
-def test_pass_output_reporting(testdir):
- testdir.makepyfile(
+def test_pass_output_reporting(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def setup_module():
print("setup_module")
pass
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
s = result.stdout.str()
assert "test_pass_has_output" not in s
assert "Four score and seven years ago..." not in s
assert "test_pass_no_output" not in s
- result = testdir.runpytest("-rPp")
+ result = pytester.runpytest("-rPp")
result.stdout.fnmatch_lines(
[
"*= PASSES =*",
)
-def test_color_yes(testdir, color_mapping):
- p1 = testdir.makepyfile(
+def test_color_yes(pytester: Pytester, color_mapping) -> None:
+ p1 = pytester.makepyfile(
"""
def fail():
assert 0
fail()
"""
)
- result = testdir.runpytest("--color=yes", str(p1))
- color_mapping.requires_ordered_markup(result)
+ result = pytester.runpytest("--color=yes", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
]
)
)
- result = testdir.runpytest("--color=yes", "--tb=short", str(p1))
+ result = pytester.runpytest("--color=yes", "--tb=short", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
)
-def test_color_no(testdir):
- testdir.makepyfile("def test_this(): assert 1")
- result = testdir.runpytest("--color=no")
+def test_color_no(pytester: Pytester) -> None:
+ pytester.makepyfile("def test_this(): assert 1")
+ result = pytester.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
-def test_color_yes_collection_on_non_atty(testdir, verbose):
+def test_color_yes_collection_on_non_atty(pytester: Pytester, verbose) -> None:
"""#1397: Skip collect progress report when working on non-terminals."""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
args = ["--color=yes"]
if verbose:
args.append("-vv")
- result = testdir.runpytest(*args)
+ result = pytester.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
result.stdout.no_fnmatch_line("*collecting 10 items*")
assert getreportopt(config) == "fE"
-def test_terminalreporter_reportopt_addopts(testdir):
- testdir.makeini("[pytest]\naddopts=-rs")
- testdir.makepyfile(
+def test_terminalreporter_reportopt_addopts(pytester: Pytester) -> None:
+ pytester.makeini("[pytest]\naddopts=-rs")
+ pytester.makepyfile(
"""
import pytest
assert not tr.hasopt('qwe')
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
-def test_tbstyle_short(testdir):
- p = testdir.makepyfile(
+def test_tbstyle_short(pytester: Pytester) -> None:
+ p = pytester.makepyfile(
"""
import pytest
assert x
"""
)
- result = testdir.runpytest("--tb=short")
+ result = pytester.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
- result.stdout.fnmatch_lines(["*%s:8*" % p.basename, " assert x", "E assert*"])
- result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*%s:8*" % p.name, " assert x", "E assert*"])
+ result = pytester.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
-def test_traceconfig(testdir):
- result = testdir.runpytest("--traceconfig")
+def test_traceconfig(pytester: Pytester) -> None:
+ result = pytester.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == ExitCode.NO_TESTS_COLLECTED
"""Test class which can be subclassed with a different option provider to
run e.g. distributed tests."""
- def test_collect_fail(self, testdir, option):
- testdir.makepyfile("import xyz\n")
- result = testdir.runpytest(*option.args)
+ def test_collect_fail(self, pytester: Pytester, option) -> None:
+ pytester.makepyfile("import xyz\n")
+ result = pytester.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
- def test_maxfailures(self, testdir, option):
- testdir.makepyfile(
+ def test_maxfailures(self, pytester: Pytester, option) -> None:
+ pytester.makepyfile(
"""
def test_1():
assert 0
assert 0
"""
)
- result = testdir.runpytest("--maxfail=2", *option.args)
+ result = pytester.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
[
"*def test_1():*",
]
)
- def test_maxfailures_with_interrupted(self, testdir):
- testdir.makepyfile(
+ def test_maxfailures_with_interrupted(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test(request):
request.session.shouldstop = "session_interrupted"
assert 0
"""
)
- result = testdir.runpytest("--maxfail=1", "-ra")
+ result = pytester.runpytest("--maxfail=1", "-ra")
result.stdout.fnmatch_lines(
[
"*= short test summary info =*",
]
)
- def test_tb_option(self, testdir, option):
- testdir.makepyfile(
+ def test_tb_option(self, pytester: Pytester, option) -> None:
+ pytester.makepyfile(
"""
import pytest
def g():
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
- result = testdir.runpytest("-rN", "--tb=%s" % tbopt)
+ result = pytester.runpytest("-rN", "--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print(6*7)" in s
assert "--calling--" not in s
assert "IndexError" not in s
- def test_tb_crashline(self, testdir, option):
- p = testdir.makepyfile(
+ def test_tb_crashline(self, pytester: Pytester, option) -> None:
+ p = pytester.makepyfile(
"""
import pytest
def g():
assert 0, "hello"
"""
)
- result = testdir.runpytest("--tb=line")
- bn = p.basename
+ result = pytester.runpytest("--tb=line")
+ bn = p.name
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
- def test_pytest_report_header(self, testdir, option):
- testdir.makeconftest(
+ def test_pytest_report_header(self, pytester: Pytester, option) -> None:
+ pytester.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
return "hello: %s" % config._somevalue
"""
)
- testdir.mkdir("a").join("conftest.py").write(
+ pytester.mkdir("a").joinpath("conftest.py").write_text(
"""
def pytest_report_header(config, startdir):
return ["line1", str(startdir)]
"""
)
- result = testdir.runpytest("a")
- result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(testdir.tmpdir)])
+ result = pytester.runpytest("a")
+ result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(pytester.path)])
- def test_show_capture(self, testdir):
- testdir.makepyfile(
+ def test_show_capture(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import sys
import logging
"""
)
- result = testdir.runpytest("--tb=short")
+ result = pytester.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
]
)
- result = testdir.runpytest("--show-capture=all", "--tb=short")
+ result = pytester.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
]
)
- stdout = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
+ stdout = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
- stdout = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
+ stdout = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
- stdout = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
+ stdout = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
- stdout = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
+ stdout = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
- def test_show_capture_with_teardown_logs(self, testdir):
+ def test_show_capture_with_teardown_logs(self, pytester: Pytester) -> None:
"""Ensure that the capturing of teardown logs honor --show-capture setting"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import logging
import sys
"""
)
- result = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
+ result = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!stdout!" in result
assert "!stderr!" not in result
assert "!log!" not in result
- result = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
+ result = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" in result
assert "!log!" not in result
- result = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
+ result = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" in result
- result = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
+ result = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" not in result
@pytest.mark.xfail("not hasattr(os, 'dup')")
-def test_fdopen_kept_alive_issue124(testdir):
- testdir.makepyfile(
+def test_fdopen_kept_alive_issue124(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import os, sys
k = []
stdout.close()
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
-def test_tbstyle_native_setup_error(testdir):
- testdir.makepyfile(
+def test_tbstyle_native_setup_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture
pass
"""
)
- result = testdir.runpytest("--tb=native")
+ result = pytester.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
-def test_terminal_summary(testdir):
- testdir.makeconftest(
+def test_terminal_summary(pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.line("exitstatus: {0}".format(exitstatus))
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
@pytest.mark.filterwarnings("default")
-def test_terminal_summary_warnings_are_displayed(testdir):
+def test_terminal_summary_warnings_are_displayed(pytester: Pytester) -> None:
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
import warnings
def pytest_terminal_summary(terminalreporter):
warnings.warn(UserWarning('internal warning'))
"""
)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_failure():
import warnings
assert 0
"""
)
- result = testdir.runpytest("-ra")
+ result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
@pytest.mark.filterwarnings("default")
-def test_terminal_summary_warnings_header_once(testdir):
- testdir.makepyfile(
+def test_terminal_summary_warnings_header_once(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_failure():
import warnings
assert 0
"""
)
- result = testdir.runpytest("-ra")
+ result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
@pytest.mark.filterwarnings("default")
-def test_terminal_no_summary_warnings_header_once(testdir):
- testdir.makepyfile(
+def test_terminal_no_summary_warnings_header_once(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
def test_failure():
import warnings
assert 0
"""
)
- result = testdir.runpytest("--no-summary")
+ result = pytester.runpytest("--no-summary")
result.stdout.no_fnmatch_line("*= warnings summary =*")
result.stdout.no_fnmatch_line("*= short test summary info =*")
tr._main_color = None
print("Based on stats: %s" % stats_arg)
- print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
+ print(f'Expect summary: "{exp_line}"; with color "{exp_color}"')
(line, color) = tr.build_summary_stats_line()
- print('Actually got: "{}"; with color "{}"'.format(line, color))
+ print(f'Actually got: "{line}"; with color "{color}"')
assert line == exp_line
assert color == exp_color
"""Ensure classic output style works as expected (#3883)"""
@pytest.fixture
- def test_files(self, testdir):
- testdir.makepyfile(
+ def test_files(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
**{
"test_one.py": "def test_one(): pass",
"test_two.py": "def test_two(): assert 0",
}
)
- def test_normal_verbosity(self, testdir, test_files):
- result = testdir.runpytest("-o", "console_output_style=classic")
+ def test_normal_verbosity(self, pytester: Pytester, test_files) -> None:
+ result = pytester.runpytest("-o", "console_output_style=classic")
result.stdout.fnmatch_lines(
[
"test_one.py .",
"test_two.py F",
- "sub{}test_three.py .F.".format(os.sep),
+ f"sub{os.sep}test_three.py .F.",
"*2 failed, 3 passed in*",
]
)
- def test_verbose(self, testdir, test_files):
- result = testdir.runpytest("-o", "console_output_style=classic", "-v")
+ def test_verbose(self, pytester: Pytester, test_files) -> None:
+ result = pytester.runpytest("-o", "console_output_style=classic", "-v")
result.stdout.fnmatch_lines(
[
"test_one.py::test_one PASSED",
"test_two.py::test_two FAILED",
- "sub{}test_three.py::test_three_1 PASSED".format(os.sep),
- "sub{}test_three.py::test_three_2 FAILED".format(os.sep),
- "sub{}test_three.py::test_three_3 PASSED".format(os.sep),
+ f"sub{os.sep}test_three.py::test_three_1 PASSED",
+ f"sub{os.sep}test_three.py::test_three_2 FAILED",
+ f"sub{os.sep}test_three.py::test_three_3 PASSED",
"*2 failed, 3 passed in*",
]
)
- def test_quiet(self, testdir, test_files):
- result = testdir.runpytest("-o", "console_output_style=classic", "-q")
+ def test_quiet(self, pytester: Pytester, test_files) -> None:
+ result = pytester.runpytest("-o", "console_output_style=classic", "-q")
result.stdout.fnmatch_lines([".F.F.", "*2 failed, 3 passed in*"])
class TestProgressOutputStyle:
@pytest.fixture
- def many_tests_files(self, testdir):
- testdir.makepyfile(
+ def many_tests_files(self, pytester: Pytester) -> None:
+ pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
""",
)
- def test_zero_tests_collected(self, testdir):
+ def test_zero_tests_collected(self, pytester: Pytester) -> None:
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
- testdir.makeconftest(
+ pytester.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
config.hook.pytest_runtest_logreport(report=rep)
"""
)
- output = testdir.runpytest()
+ output = pytester.runpytest()
output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
- def test_normal(self, many_tests_files, testdir):
- output = testdir.runpytest()
+ def test_normal(self, many_tests_files, pytester: Pytester) -> None:
+ output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
]
)
- def test_colored_progress(self, testdir, monkeypatch, color_mapping):
+ def test_colored_progress(
+ self, pytester: Pytester, monkeypatch, color_mapping
+ ) -> None:
monkeypatch.setenv("PY_COLORS", "1")
- testdir.makepyfile(
+ pytester.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_foobar(i): raise ValueError()
""",
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
)
# Only xfail should have yellow progress indicator.
- result = testdir.runpytest("test_axfail.py")
+ result = pytester.runpytest("test_axfail.py")
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
)
)
- def test_count(self, many_tests_files, testdir):
- testdir.makeini(
+ def test_count(self, many_tests_files, pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
- output = testdir.runpytest()
+ output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[10/20\]",
]
)
- def test_verbose(self, many_tests_files, testdir):
- output = testdir.runpytest("-v")
+ def test_verbose(self, many_tests_files, pytester: Pytester) -> None:
+ output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
]
)
- def test_verbose_count(self, many_tests_files, testdir):
- testdir.makeini(
+ def test_verbose_count(self, many_tests_files, pytester: Pytester) -> None:
+ pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
- output = testdir.runpytest("-v")
+ output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 1/20\]",
]
)
- def test_xdist_normal(self, many_tests_files, testdir, monkeypatch):
+ def test_xdist_normal(
+ self, many_tests_files, pytester: Pytester, monkeypatch
+ ) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
- output = testdir.runpytest("-n2")
+ output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
- def test_xdist_normal_count(self, many_tests_files, testdir, monkeypatch):
+ def test_xdist_normal_count(
+ self, many_tests_files, pytester: Pytester, monkeypatch
+ ) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
- testdir.makeini(
+ pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
- output = testdir.runpytest("-n2")
+ output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[20/20\]"])
- def test_xdist_verbose(self, many_tests_files, testdir, monkeypatch):
+ def test_xdist_verbose(
+ self, many_tests_files, pytester: Pytester, monkeypatch
+ ) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
- output = testdir.runpytest("-n2", "-v")
+ output = pytester.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
]
)
- def test_capture_no(self, many_tests_files, testdir):
- output = testdir.runpytest("-s")
+ def test_capture_no(self, many_tests_files, pytester: Pytester) -> None:
+ output = pytester.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
- output = testdir.runpytest("--capture=no")
+ output = pytester.runpytest("--capture=no")
output.stdout.no_fnmatch_line("*%]*")
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
- def contest_with_teardown_fixture(self, testdir):
- testdir.makeconftest(
+ def contest_with_teardown_fixture(self, pytester: Pytester) -> None:
+ pytester.makeconftest(
"""
import pytest
)
@pytest.fixture
- def many_files(self, testdir, contest_with_teardown_fixture):
- testdir.makepyfile(
+ def many_files(self, pytester: Pytester, contest_with_teardown_fixture) -> None:
+ pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
""",
)
- def test_teardown_simple(self, testdir, contest_with_teardown_fixture):
- testdir.makepyfile(
+ def test_teardown_simple(
+ self, pytester: Pytester, contest_with_teardown_fixture
+ ) -> None:
+ pytester.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
- output = testdir.runpytest()
+ output = pytester.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
- self, testdir, contest_with_teardown_fixture
- ):
- testdir.makepyfile(
+ self, pytester: Pytester, contest_with_teardown_fixture
+ ) -> None:
+ pytester.makepyfile(
"""
def test_foo(fail_teardown):
assert 0
"""
)
- output = testdir.runpytest("-rfE")
+ output = pytester.runpytest("-rfE")
output.stdout.re_match_lines(
[
r"test_teardown_with_test_also_failing.py FE\s+\[100%\]",
]
)
- def test_teardown_many(self, testdir, many_files):
- output = testdir.runpytest()
+ def test_teardown_many(self, pytester: Pytester, many_files) -> None:
+ output = pytester.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(
- self, testdir: Testdir, many_files, color_mapping
+ self, pytester: Pytester, many_files, color_mapping
) -> None:
- result = testdir.runpytest("-v")
+ result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
)
)
- def test_xdist_normal(self, many_files, testdir, monkeypatch):
+ def test_xdist_normal(self, many_files, pytester: Pytester, monkeypatch) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
- output = testdir.runpytest("-n2")
+ output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
assert reason == message
-def test_line_with_reprcrash(monkeypatch):
+def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
mocked_verbose_word = "FAILED"
mocked_pos = "some::nodeid"
actual = _get_line_with_reprcrash_message(config, rep(), width) # type: ignore
assert actual == expected
- if actual != "{} {}".format(mocked_verbose_word, mocked_pos):
+ if actual != f"{mocked_verbose_word} {mocked_pos}":
assert len(actual) <= width
assert wcswidth(actual) <= width
assert format_session_duration(seconds) == expected
-def test_collecterror(testdir):
- p1 = testdir.makepyfile("raise SyntaxError()")
- result = testdir.runpytest("-ra", str(p1))
+def test_collecterror(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("raise SyntaxError()")
+ result = pytester.runpytest("-ra", str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
)
-def test_no_summary_collecterror(testdir):
- p1 = testdir.makepyfile("raise SyntaxError()")
- result = testdir.runpytest("-ra", "--no-summary", str(p1))
+def test_no_summary_collecterror(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("raise SyntaxError()")
+ result = pytester.runpytest("-ra", "--no-summary", str(p1))
result.stdout.no_fnmatch_line("*= ERRORS =*")
-def test_via_exec(testdir: Testdir) -> None:
- p1 = testdir.makepyfile("exec('def test_via_exec(): pass')")
- result = testdir.runpytest(str(p1), "-vv")
+def test_via_exec(pytester: Pytester) -> None:
+ p1 = pytester.makepyfile("exec('def test_via_exec(): pass')")
+ result = pytester.runpytest(str(p1), "-vv")
result.stdout.fnmatch_lines(
["test_via_exec.py::test_via_exec <- <string> PASSED*", "*= 1 passed in *"]
)
class TestCodeHighlight:
- def test_code_highlight_simple(self, testdir: Testdir, color_mapping) -> None:
- testdir.makepyfile(
+ def test_code_highlight_simple(self, pytester: Pytester, color_mapping) -> None:
+ pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
- result = testdir.runpytest("--color=yes")
- color_mapping.requires_ordered_markup(result)
+ result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
)
)
- def test_code_highlight_continuation(self, testdir: Testdir, color_mapping) -> None:
- testdir.makepyfile(
+ def test_code_highlight_continuation(
+ self, pytester: Pytester, color_mapping
+ ) -> None:
+ pytester.makepyfile(
"""
def test_foo():
print('''
'''); assert 0
"""
)
- result = testdir.runpytest("--color=yes")
- color_mapping.requires_ordered_markup(result)
+ result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
]
)
)
+
+
+def test_raw_skip_reason_skipped() -> None:
+ report = SimpleNamespace()
+ report.skipped = True
+ report.longrepr = ("xyz", 3, "Skipped: Just so")
+
+ reason = _get_raw_skip_reason(cast(TestReport, report))
+ assert reason == "Just so"
+
+
+def test_raw_skip_reason_xfail() -> None:
+ report = SimpleNamespace()
+ report.wasxfail = "reason: To everything there is a season"
+
+ reason = _get_raw_skip_reason(cast(TestReport, report))
+ assert reason == "To everything there is a season"
+
+
+def test_format_trimmed() -> None:
+ msg = "unconditional skip"
+
+ assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
+ assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
--- /dev/null
+import sys
+
+import pytest
+from _pytest.pytester import Pytester
+
+
+if sys.version_info < (3, 8):
+ pytest.skip("threadexception plugin needs Python>=3.8", allow_module_level=True)
+
+
+@pytest.mark.filterwarnings("default")
+def test_unhandled_thread_exception(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import threading
+
+ def test_it():
+ def oops():
+ raise ValueError("Oops")
+
+ t = threading.Thread(target=oops, name="MyThread")
+ t.start()
+ t.join()
+
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnhandledThreadExceptionWarning: Exception in thread MyThread",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: Oops",
+ " ",
+ " warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("default")
+def test_unhandled_thread_exception_in_setup(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import threading
+ import pytest
+
+ @pytest.fixture
+ def threadexc():
+ def oops():
+ raise ValueError("Oops")
+ t = threading.Thread(target=oops, name="MyThread")
+ t.start()
+ t.join()
+
+ def test_it(threadexc): pass
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnhandledThreadExceptionWarning: Exception in thread MyThread",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: Oops",
+ " ",
+ " warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("default")
+def test_unhandled_thread_exception_in_teardown(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import threading
+ import pytest
+
+ @pytest.fixture
+ def threadexc():
+ def oops():
+ raise ValueError("Oops")
+ yield
+ t = threading.Thread(target=oops, name="MyThread")
+ t.start()
+ t.join()
+
+ def test_it(threadexc): pass
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnhandledThreadExceptionWarning: Exception in thread MyThread",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: Oops",
+ " ",
+ " warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("error::pytest.PytestUnhandledThreadExceptionWarning")
+def test_unhandled_thread_exception_warning_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import threading
+ import pytest
+
+ def test_it():
+ def oops():
+ raise ValueError("Oops")
+ t = threading.Thread(target=oops, name="MyThread")
+ t.start()
+ t.join()
+
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == pytest.ExitCode.TESTS_FAILED
+ assert result.parseoutcomes() == {"passed": 1, "failed": 1}
import os
import stat
import sys
+from pathlib import Path
from typing import Callable
from typing import cast
from typing import List
from _pytest.pathlib import make_numbered_dir
from _pytest.pathlib import maybe_delete_a_numbered_dir
from _pytest.pathlib import on_rm_rf_error
-from _pytest.pathlib import Path
from _pytest.pathlib import register_cleanup_lock_removal
from _pytest.pathlib import rm_rf
+from _pytest.pytester import Pytester
from _pytest.tmpdir import get_user
from _pytest.tmpdir import TempdirFactory
from _pytest.tmpdir import TempPathFactory
-def test_tmpdir_fixture(testdir):
- p = testdir.copy_example("tmpdir/tmpdir_fixture.py")
- results = testdir.runpytest(p)
+def test_tmpdir_fixture(pytester: Pytester) -> None:
+ p = pytester.copy_example("tmpdir/tmpdir_fixture.py")
+ results = pytester.runpytest(p)
results.stdout.fnmatch_lines(["*1 passed*"])
class TestTempdirHandler:
def test_mktemp(self, tmp_path):
config = cast(Config, FakeConfig(tmp_path))
- t = TempdirFactory(TempPathFactory.from_config(config))
+ t = TempdirFactory(
+ TempPathFactory.from_config(config, _ispytest=True), _ispytest=True
+ )
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
"""#4425"""
monkeypatch.chdir(tmp_path)
config = cast(Config, FakeConfig("hello"))
- t = TempPathFactory.from_config(config)
+ t = TempPathFactory.from_config(config, _ispytest=True)
assert t.getbasetemp().resolve() == (tmp_path / "hello").resolve()
class TestConfigTmpdir:
- def test_getbasetemp_custom_removes_old(self, testdir):
- mytemp = testdir.tmpdir.join("xyz")
- p = testdir.makepyfile(
+ def test_getbasetemp_custom_removes_old(self, pytester: Pytester) -> None:
+ mytemp = pytester.path.joinpath("xyz")
+ p = pytester.makepyfile(
"""
def test_1(tmpdir):
pass
"""
)
- testdir.runpytest(p, "--basetemp=%s" % mytemp)
- mytemp.check()
- mytemp.ensure("hello")
+ pytester.runpytest(p, "--basetemp=%s" % mytemp)
+ assert mytemp.exists()
+ mytemp.joinpath("hello").touch()
- testdir.runpytest(p, "--basetemp=%s" % mytemp)
- mytemp.check()
- assert not mytemp.join("hello").check()
+ pytester.runpytest(p, "--basetemp=%s" % mytemp)
+ assert mytemp.exists()
+ assert not mytemp.joinpath("hello").exists()
testdata = [
@pytest.mark.parametrize("basename, is_ok", testdata)
-def test_mktemp(testdir, basename, is_ok):
- mytemp = testdir.tmpdir.mkdir("mytemp")
- p = testdir.makepyfile(
+def test_mktemp(pytester: Pytester, basename: str, is_ok: bool) -> None:
+ mytemp = pytester.mkdir("mytemp")
+ p = pytester.makepyfile(
"""
def test_abs_path(tmpdir_factory):
tmpdir_factory.mktemp('{}', numbered=False)
)
)
- result = testdir.runpytest(p, "--basetemp=%s" % mytemp)
+ result = pytester.runpytest(p, "--basetemp=%s" % mytemp)
if is_ok:
assert result.ret == 0
- assert mytemp.join(basename).check()
+ assert mytemp.joinpath(basename).exists()
else:
assert result.ret == 1
result.stdout.fnmatch_lines("*ValueError*")
-def test_tmpdir_always_is_realpath(testdir):
+def test_tmpdir_always_is_realpath(pytester: Pytester) -> None:
# the reason why tmpdir should be a realpath is that
# when you cd to it and do "os.getcwd()" you will anyway
# get the realpath. Using the symlinked path can thus
# easily result in path-inequality
# XXX if that proves to be a problem, consider using
# os.environ["PWD"]
- realtemp = testdir.tmpdir.mkdir("myrealtemp")
- linktemp = testdir.tmpdir.join("symlinktemp")
+ realtemp = pytester.mkdir("myrealtemp")
+ linktemp = pytester.path.joinpath("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
- p = testdir.makepyfile(
+ p = pytester.makepyfile(
"""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
"""
)
- result = testdir.runpytest("-s", p, "--basetemp=%s/bt" % linktemp)
+ result = pytester.runpytest("-s", p, "--basetemp=%s/bt" % linktemp)
assert not result.ret
-def test_tmp_path_always_is_realpath(testdir, monkeypatch):
+def test_tmp_path_always_is_realpath(pytester: Pytester, monkeypatch) -> None:
# for reasoning see: test_tmpdir_always_is_realpath test-case
- realtemp = testdir.tmpdir.mkdir("myrealtemp")
- linktemp = testdir.tmpdir.join("symlinktemp")
+ realtemp = pytester.mkdir("myrealtemp")
+ linktemp = pytester.path.joinpath("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
monkeypatch.setenv("PYTEST_DEBUG_TEMPROOT", str(linktemp))
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_1(tmp_path):
assert tmp_path.resolve() == tmp_path
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_tmpdir_too_long_on_parametrization(testdir):
- testdir.makepyfile(
+def test_tmpdir_too_long_on_parametrization(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
tmpdir.ensure("hello")
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_tmpdir_factory(testdir):
- testdir.makepyfile(
+def test_tmpdir_factory(pytester: Pytester) -> None:
+ pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
assert session_dir.isdir()
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
-def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
+def test_tmpdir_fallback_tox_env(pytester: Pytester, monkeypatch) -> None:
"""Test that tmpdir works even if environment variables required by getpass
module are missing (#1010).
"""
monkeypatch.delenv("USER", raising=False)
monkeypatch.delenv("USERNAME", raising=False)
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows")
-def test_tmpdir_fallback_uid_not_found(testdir):
+def test_tmpdir_fallback_uid_not_found(pytester: Pytester) -> None:
"""Test that tmpdir works even if the current process's user id does not
correspond to a valid user.
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
- reprec = testdir.inline_run()
+ reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_lock_register_cleanup_removal(self, tmp_path: Path) -> None:
lock = create_cleanup_lock(tmp_path)
- registry = [] # type: List[Callable[..., None]]
+ registry: List[Callable[..., None]] = []
register_cleanup_lock_removal(lock, register=registry.append)
(cleanup_func,) = registry
assert Path(tmpdir) == tmp_path
-def test_basetemp_with_read_only_files(testdir):
+def test_basetemp_with_read_only_files(pytester: Pytester) -> None:
"""Integration test for #5524"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import os
import stat
os.chmod(str(fn), mode & ~stat.S_IREAD)
"""
)
- result = testdir.runpytest("--basetemp=tmp")
+ result = pytester.runpytest("--basetemp=tmp")
assert result.ret == 0
# running a second time and ensure we don't crash
- result = testdir.runpytest("--basetemp=tmp")
+ result = pytester.runpytest("--basetemp=tmp")
assert result.ret == 0
import pytest
from _pytest.config import ExitCode
+from _pytest.pytester import Testdir
def test_simple_unittest(testdir):
# will crash both at test time and at teardown
"""
)
- # Ignore DeprecationWarning (for `cmp`) from attrs through twisted,
- # for stable test results.
- result = testdir.runpytest(
- "-vv", "-oconsole_output_style=classic", "-W", "ignore::DeprecationWarning"
- )
+ result = testdir.runpytest("-vv", "-oconsole_output_style=classic")
result.stdout.fnmatch_lines(
[
"test_trial_error.py::TC::test_four FAILED",
assert result.ret == 1
-@pytest.mark.parametrize(
- "fix_type, stmt", [("fixture", "return"), ("yield_fixture", "yield")]
-)
-def test_unittest_setup_interaction(testdir, fix_type, stmt):
+@pytest.mark.parametrize("stmt", ["return", "yield"])
+def test_unittest_setup_interaction(testdir: Testdir, stmt: str) -> None:
testdir.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
- @pytest.{fix_type}(scope="class", autouse=True)
+ @pytest.fixture(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
- @pytest.{fix_type}(scope="function", autouse=True)
+ @pytest.fixture(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(
- fix_type=fix_type, stmt=stmt
+ stmt=stmt
)
)
result = testdir.runpytest()
)
def test_setup_inheritance_skipping(testdir, test_name, expected_outcome):
"""Issue #4700"""
- testdir.copy_example("unittest/{}".format(test_name))
+ testdir.copy_example(f"unittest/{test_name}")
result = testdir.runpytest()
- result.stdout.fnmatch_lines(["* {} in *".format(expected_outcome)])
+ result.stdout.fnmatch_lines([f"* {expected_outcome} in *"])
def test_BdbQuit(testdir):
We delay the normal tearDown() calls when --pdb is given, so this ensures we are calling
tearDown() eventually to avoid memory leaks when using --pdb.
"""
- teardowns = [] # type: List[str]
+ teardowns: List[str] = []
monkeypatch.setattr(
pytest, "test_pdb_teardown_called_teardowns", teardowns, raising=False
)
@pytest.mark.parametrize("mark", ["@unittest.skip", "@pytest.mark.skip"])
def test_pdb_teardown_skipped(testdir, monkeypatch, mark: str) -> None:
"""With --pdb, setUp and tearDown should not be called for skipped tests."""
- tracked = [] # type: List[str]
+ tracked: List[str] = []
monkeypatch.setattr(pytest, "test_pdb_teardown_skipped", tracked, raising=False)
testdir.makepyfile(
"*1 passed*",
]
result.stdout.fnmatch_lines(expected_lines)
+
+
+@pytest.mark.skipif(
+ sys.version_info < (3, 8), reason="Feature introduced in Python 3.8"
+)
+def test_do_class_cleanups_on_success(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ @classmethod
+ def setUpClass(cls):
+ def cleanup():
+ cls.values.append(1)
+ cls.addClassCleanup(cleanup)
+ def test_one(self):
+ pass
+ def test_two(self):
+ pass
+ def test_cleanup_called_exactly_once():
+ assert MyTestCase.values == [1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 0
+ assert passed == 3
+
+
+@pytest.mark.skipif(
+ sys.version_info < (3, 8), reason="Feature introduced in Python 3.8"
+)
+def test_do_class_cleanups_on_setupclass_failure(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ @classmethod
+ def setUpClass(cls):
+ def cleanup():
+ cls.values.append(1)
+ cls.addClassCleanup(cleanup)
+ assert False
+ def test_one(self):
+ pass
+ def test_cleanup_called_exactly_once():
+ assert MyTestCase.values == [1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 1
+ assert passed == 1
+
+
+@pytest.mark.skipif(
+ sys.version_info < (3, 8), reason="Feature introduced in Python 3.8"
+)
+def test_do_class_cleanups_on_teardownclass_failure(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ @classmethod
+ def setUpClass(cls):
+ def cleanup():
+ cls.values.append(1)
+ cls.addClassCleanup(cleanup)
+ @classmethod
+ def tearDownClass(cls):
+ assert False
+ def test_one(self):
+ pass
+ def test_two(self):
+ pass
+ def test_cleanup_called_exactly_once():
+ assert MyTestCase.values == [1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert passed == 3
+
+
+def test_do_cleanups_on_success(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ def setUp(self):
+ def cleanup():
+ self.values.append(1)
+ self.addCleanup(cleanup)
+ def test_one(self):
+ pass
+ def test_two(self):
+ pass
+ def test_cleanup_called_the_right_number_of_times():
+ assert MyTestCase.values == [1, 1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 0
+ assert passed == 3
+
+
+def test_do_cleanups_on_setup_failure(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ def setUp(self):
+ def cleanup():
+ self.values.append(1)
+ self.addCleanup(cleanup)
+ assert False
+ def test_one(self):
+ pass
+ def test_two(self):
+ pass
+ def test_cleanup_called_the_right_number_of_times():
+ assert MyTestCase.values == [1, 1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 2
+ assert passed == 1
+
+
+def test_do_cleanups_on_teardown_failure(testdir):
+ testpath = testdir.makepyfile(
+ """
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ values = []
+ def setUp(self):
+ def cleanup():
+ self.values.append(1)
+ self.addCleanup(cleanup)
+ def tearDown(self):
+ assert False
+ def test_one(self):
+ pass
+ def test_two(self):
+ pass
+ def test_cleanup_called_the_right_number_of_times():
+ assert MyTestCase.values == [1, 1]
+ """
+ )
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 2
+ assert passed == 1
--- /dev/null
+import sys
+
+import pytest
+from _pytest.pytester import Pytester
+
+
+if sys.version_info < (3, 8):
+ pytest.skip("unraisableexception plugin needs Python>=3.8", allow_module_level=True)
+
+
+@pytest.mark.filterwarnings("default")
+def test_unraisable(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ class BrokenDel:
+ def __del__(self):
+ raise ValueError("del is broken")
+
+ def test_it():
+ obj = BrokenDel()
+ del obj
+
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnraisableExceptionWarning: Exception ignored in: <function BrokenDel.__del__ at *>",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: del is broken",
+ " ",
+ " warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("default")
+def test_unraisable_in_setup(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import pytest
+
+ class BrokenDel:
+ def __del__(self):
+ raise ValueError("del is broken")
+
+ @pytest.fixture
+ def broken_del():
+ obj = BrokenDel()
+ del obj
+
+ def test_it(broken_del): pass
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnraisableExceptionWarning: Exception ignored in: <function BrokenDel.__del__ at *>",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: del is broken",
+ " ",
+ " warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("default")
+def test_unraisable_in_teardown(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ import pytest
+
+ class BrokenDel:
+ def __del__(self):
+ raise ValueError("del is broken")
+
+ @pytest.fixture
+ def broken_del():
+ yield
+ obj = BrokenDel()
+ del obj
+
+ def test_it(broken_del): pass
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == 0
+ assert result.parseoutcomes() == {"passed": 2, "warnings": 1}
+ result.stdout.fnmatch_lines(
+ [
+ "*= warnings summary =*",
+ "test_it.py::test_it",
+ " * PytestUnraisableExceptionWarning: Exception ignored in: <function BrokenDel.__del__ at *>",
+ " ",
+ " Traceback (most recent call last):",
+ " ValueError: del is broken",
+ " ",
+ " warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))",
+ ]
+ )
+
+
+@pytest.mark.filterwarnings("error::pytest.PytestUnraisableExceptionWarning")
+def test_unraisable_warning_error(pytester: Pytester) -> None:
+ pytester.makepyfile(
+ test_it="""
+ class BrokenDel:
+ def __del__(self) -> None:
+ raise ValueError("del is broken")
+
+ def test_it() -> None:
+ obj = BrokenDel()
+ del obj
+
+ def test_2(): pass
+ """
+ )
+ result = pytester.runpytest()
+ assert result.ret == pytest.ExitCode.TESTS_FAILED
+ assert result.parseoutcomes() == {"passed": 1, "failed": 1}
import inspect
-import _pytest.warning_types
import pytest
+from _pytest import warning_types
+from _pytest.pytester import Pytester
@pytest.mark.parametrize(
"warning_class",
[
w
- for n, w in vars(_pytest.warning_types).items()
+ for n, w in vars(warning_types).items()
if inspect.isclass(w) and issubclass(w, Warning)
],
)
-def test_warning_types(warning_class):
+def test_warning_types(warning_class: UserWarning) -> None:
"""Make sure all warnings declared in _pytest.warning_types are displayed as coming
from 'pytest' instead of the internal module (#5452).
"""
@pytest.mark.filterwarnings("error::pytest.PytestWarning")
-def test_pytest_warnings_repr_integration_test(testdir):
+def test_pytest_warnings_repr_integration_test(pytester: Pytester) -> None:
"""Small integration test to ensure our small hack of setting the __module__ attribute
of our warnings actually works (#5452).
"""
- testdir.makepyfile(
+ pytester.makepyfile(
"""
import pytest
import warnings
warnings.warn(pytest.PytestWarning("some warning"))
"""
)
- result = testdir.runpytest()
+ result = pytester.runpytest()
result.stdout.fnmatch_lines(["E pytest.PytestWarning: some warning"])
@pytest.fixture
def capwarn(self, testdir):
class CapturedWarnings:
- captured = (
- []
- ) # type: List[Tuple[warnings.WarningMessage, Optional[Tuple[str, int, str]]]]
+ captured: List[
+ Tuple[warnings.WarningMessage, Optional[Tuple[str, int, str]]]
+ ] = ([])
@classmethod
def pytest_warning_recorded(cls, warning_message, when, nodeid, location):
file, _, func = location
assert "could not load initial conftests" in str(warning.message)
- assert "config{sep}__init__.py".format(sep=os.sep) in file
+ assert f"config{os.sep}__init__.py" in file
assert func == "_preparse"
@pytest.mark.filterwarnings("default")
file, _, func = location
assert "skipped plugin 'some_plugin': thing" in str(warning.message)
- assert "config{sep}__init__.py".format(sep=os.sep) in file
+ assert f"config{os.sep}__init__.py" in file
assert func == "_warn_about_skipped_plugins"
def test_issue4445_issue5928_mark_generator(self, testdir):
[tox]
isolated_build = True
-minversion = 3.5.3
+minversion = 3.20.0
distshare = {homedir}/.tox/distshare
# make sure to update environment list in travis.yml and appveyor.yml
envlist =
linting
- py35
py36
py37
py38
extras = testing
deps =
doctesting: PyYAML
- oldattrs: attrs==17.4.0
- oldattrs: hypothesis<=4.38.1
- numpy: numpy
- pexpect: pexpect
+ numpy: numpy>=1.19.4
+ pexpect: pexpect>=4.8.0
pluggymaster: git+https://github.com/pytest-dev/pluggy.git@master
- pygments
+ pygments>=2.7.2
unittestextras: twisted
unittestextras: asynctest
- xdist: pytest-xdist>=1.13
+ xdist: pytest-xdist>=2.1.0
+ xdist: -e .
{env:_PYTEST_TOX_EXTRA_DEP:}
[testenv:linting]
skip_install = True
basepython = python3
-deps = pre-commit>=1.11.0
+deps = pre-commit>=2.9.3
commands = pre-commit run --all-files --show-diff-on-failure {posargs:}
-[testenv:mypy]
-extras = checkqa-mypy, testing
-commands = mypy {posargs:src testing}
-
-[testenv:mypy-diff]
-extras = checkqa-mypy, testing
-deps =
- lxml
- diff-cover
-commands =
- -mypy --cobertura-xml-report {envtmpdir} {posargs:src testing}
- diff-cover --fail-under=100 --compare-branch={env:DIFF_BRANCH:origin/{env:GITHUB_BASE_REF:master}} {envtmpdir}/cobertura.xml
-
[testenv:docs]
basepython = python3
usedevelop = True
changedir = doc/en
basepython = python3
passenv = SETUPTOOLS_SCM_PRETEND_VERSION
+# TODO: When setuptools-scm 5.0.0 is released, use SETUPTOOLS_SCM_PRETEND_VERSION_FOR_PYTEST
+# and remove the next line.
+install_command=python -m pip --use-deprecated=legacy-resolver install {opts} {packages}
deps =
dataclasses
PyYAML
download=true
install_command=python -m pip --use-feature=2020-resolver install {opts} {packages}
changedir = testing/plugins_integration
-deps =
- anyio[curio,trio]
- django
- pytest-asyncio
- pytest-bdd
- pytest-cov
- pytest-django
- pytest-flakes
- pytest-html
- pytest-mock
- pytest-sugar
- pytest-trio
- pytest-twisted
- twisted
- pytest-xvfb
+deps = -rtesting/plugins_integration/requirements.txt
setenv =
PYTHONPATH=.
+ # due to pytest-rerunfailures requiring 6.2+; can be removed after 6.2.0
+ SETUPTOOLS_SCM_PRETEND_VERSION=6.2.0a1
commands =
pip check
pytest bdd_wallet.py
pytest --cov=. simple_integration.py
pytest --ds=django_settings simple_integration.py
pytest --html=simple.html simple_integration.py
+ pytest --reruns 5 simple_integration.py
pytest pytest_anyio_integration.py
pytest pytest_asyncio_integration.py
pytest pytest_mock_integration.py
deps =
colorama
github3.py
- pre-commit>=1.11.0
+ pre-commit>=2.9.3
wheel
towncrier
commands = python scripts/release.py {posargs}