cache:
- '%LOCALAPPDATA%\pip\Cache'
-matrix:
- allow_failures:
- - USE_PYTEST: true
-
environment:
global:
MINGW_32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin
- PYTHON: C:\Python34-x64
PYTHON_VERSION: 3.4
PYTHON_ARCH: 64
- USE_PYTEST: true
+ TEST_MODE: fast
- PYTHON: C:\Python36
PYTHON_VERSION: 3.6
PYTHON_ARCH: 32
TEST_MODE: fast
+ - PYTHON: C:\Python37
+ PYTHON_VERSION: 3.7
+ PYTHON_ARCH: 32
+ TEST_MODE: fast
+
- PYTHON: C:\Python27-x64
PYTHON_VERSION: 2.7
PYTHON_ARCH: 64
PYTHON_ARCH: 64
TEST_MODE: full
+ - PYTHON: C:\Python37-x64
+ PYTHON_VERSION: 3.7
+ PYTHON_ARCH: 64
+ TEST_MODE: full
+
init:
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
- "ECHO \"%APPVEYOR_SCHEDULED_BUILD%\""
# Here, we add MinGW to the path to be able to link an OpenBLAS.dll
# We then use the import library from the DLL to compile with MSVC
- ps: |
- If ($env:USE_PYTEST -eq "true") {
- pip install -e .
- } Else {
- pip wheel -v -v -v --wheel-dir=dist .
-
- # For each wheel that pip has placed in the "dist" directory
- # First, upload the wheel to the "artifacts" tab and then
- # install the wheel. If we have only built numpy (as is the case here),
- # then there will be one wheel to install.
-
- # This method is more representative of what will be distributed,
- # because it actually tests what the built wheels will be rather than
- # what 'setup.py install' will do and at it uploads the wheels so that
- # they can be inspected.
-
- ls dist -r | Foreach-Object {
- appveyor PushArtifact $_.FullName
- pip install $_.FullName
- }
+ pip wheel -v -v -v --wheel-dir=dist .
+
+ # For each wheel that pip has placed in the "dist" directory
+ # First, upload the wheel to the "artifacts" tab and then
+ # install the wheel. If we have only built numpy (as is the case here),
+ # then there will be one wheel to install.
+
+ # This method is more representative of what will be distributed,
+ # because it actually tests what the built wheels will be rather than
+ # what 'setup.py install' will do and at it uploads the wheels so that
+ # they can be inspected.
+
+ ls dist -r | Foreach-Object {
+ Push-AppveyorArtifact $_.FullName
+ pip install $_.FullName
}
test_script:
- - if [%USE_PYTEST%]==[true] pytest -n3 --junitxml=junit-results.xml
- - if [%USE_PYTEST%]==[] python runtests.py -v -n -m %TEST_MODE%
+ python runtests.py -v -n -m %TEST_MODE% -- --junitxml=%cd%\junit-results.xml
after_build:
# Remove old or huge cache files to hopefully not exceed the 1GB cache limit.
docker:
# CircleCI maintains a library of pre-built images
# documented at https://circleci.com/docs/2.0/circleci-images/
- - image: circleci/python:3.6.1
+ - image: circleci/python:3.6.6
working_directory: ~/repo
python3 -m venv venv
. venv/bin/activate
pip install cython sphinx matplotlib
+ sudo apt-get update
+ sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
- run:
name: build numpy
pip install --upgrade pip setuptools
pip install cython
pip install .
+ pip install scipy
- run:
name: build devdocs
if [ "${CIRCLE_BRANCH}" == "master" ]; then
touch doc/neps/_build/html/.nojekyll
- ./tools/push_to_repo.py doc/neps/_build/html \
+ ./tools/ci/push_docs_to_repo.py doc/neps/_build/html \
git@github.com:numpy/neps.git \
--committer "numpy-circleci-bot" \
--email "numpy-circleci-bot@nomail" \
--- /dev/null
+[run]
+branch = True
+include = */numpy/*
ehthumbs.db
Thumbs.db
+# pytest generated files #
+##########################
+/.pytest_cache
+
# Things specific to this project #
###################################
numpy/core/__svn_version__.py
Abdul Muneer <abdulmuneer@gmail.com> abdulmuneer <abdulmuneer@gmail.com>
Adam Ginsburg <adam.g.ginsburg@gmail.com> Adam Ginsburg <keflavich@gmail.com>
Albert Jornet Puig <albert.jornet@ic3.cat> jurnix <albert.jornet@ic3.cat>
-Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
-Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com>
Alex Griffing <argriffi@ncsu.edu> alex <argriffi@ncsu.edu>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffi@ncsu.edu>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@gmail.com>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@users.noreply.github.com>
+Alex Thomas <alexthomas93@users.noreply.github.com> alexthomas93 <alexthomas93@users.noreply.github.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
+Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
+Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com>
Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com>
Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com>
Alok Singhal <gandalf013@gmail.com> Alok Singhal <alok@merfinllc.com>
Anne Archibald <peridot.faceted@gmail.com> Anne Archibald <archibald@astron.nl>
Anže Starič <anze.staric@gmail.com> astaric <anze.staric@gmail.com>
Aron Ahmadia <aron@ahmadia.net> ahmadia <aron@ahmadia.net>
+Aarthi Agurusa <agurusa@gmail.com> agurusa <agurusa@gmail.com>
Arun Persaud <apersaud@lbl.gov> Arun Persaud <arun@nubati.net>
Åsmund Hjulstad <ahju@statoil.com> Åsmund Hjulstad <asmund@hjulstad.com>
Auke Wiggers <wiggers.auke@gmail.com> auke <wiggers.auke@gmail.com>
Benjamin Root <ben.v.root@gmail.com> weathergod <?@?>
Bertrand Lefebvre <bertrand.l3f@gmail.com> bertrand <bertrand.l3f@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> Bertrand <bertrand.l3f@gmail.com>
+Bob Eldering <eldering@jive.eu> bobeldering <eldering@jive.eu>
Brett R Murphy <bmurphy@enthought.com> brettrmurphy <bmurphy@enthought.com>
Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@Laptop-3.local>
Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@laptop.local>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeier <dhomeie@gwdg.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeir <derek@astro.phsik.uni-goettingen.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homier <derek@astro.physik.uni-goettingen.de>
+Derrick Williams <myutat@gmail.com> derrick <myutat@gmail.com>
+Dmitriy Shalyga <zuko3d@gmail.com> zuko3d <zuko3d@gmail.com>
Egor Zindy <ezindy@gmail.com> zindy <ezindy@gmail.com>
Endolith <endolith@gmail.com>
Eric Fode <ericfode@gmail.com> Eric Fode <ericfode@linuxlaptop.(none)>
Greg Yang <sorcererofdm@gmail.com> eulerreich <sorcererofdm@gmail.com>
Greg Young <gfyoung17@gmail.com> gfyoung <gfyoung17@gmail.com>
Greg Young <gfyoung17@gmail.com> gfyoung <gfyoung@mit.edu>
+Guo Ci <zguoci@gmail.com> guoci <zguoci@gmail.com>
Han Genuit <hangenuit@gmail.com> 87 <hangenuit@gmail.com>
Han Genuit <hangenuit@gmail.com> hangenuit@gmail.com <hangenuit@gmail.com>
Han Genuit <hangenuit@gmail.com> Han <hangenuit@gmail.com>
Hanno Klemm <hanno.klemm@maerskoil.com> hklemm <hanno.klemm@maerskoil.com>
Hemil Desai <desai38@purdue.edu> hemildesai <desai38@purdue.edu>
+Hiroyuki V. Yamazaki <hiroyuki.vincent.yamazaki@gmail.com> hvy <hiroyuki.vincent.yamazaki@gmail.com>
+Gerhard Hobler <gerhard.hobler@tuwien.ac.at> hobler <gerhard.hobler@tuwien.ac.at>
Irvin Probst <irvin.probst@ensta-bretagne.fr> I--P <irvin.probst@ensta-bretagne.fr>
Jaime Fernandez <jaime.frio@gmail.com> Jaime Fernandez <jaime.fernandez@hp.com>
Jaime Fernandez <jaime.frio@gmail.com> jaimefrio <jaime.frio@gmail.com>
Naveen Arunachalam <notatroll.troll@gmail.com> naveenarun <notatroll.troll@gmail.com>
Nicolas Scheffer <nicolas.scheffer@sri.com> Nicolas Scheffer <scheffer@speech.sri.com>
Nicholas A. Del Grosso <delgrosso@bio.lmu.de> nickdg <delgrosso@bio.lmu.de>
+Nick Minkyu Lee <mknicklee@protonmail.com> fivemok <9394929+fivemok@users.noreply.github.com>
Ondřej Čertík <ondrej.certik@gmail.com> Ondrej Certik <ondrej.certik@gmail.com>
Óscar Villellas Guillén <oscar.villellas@continuum.io> ovillellas <oscar.villellas@continuum.io>
Pat Miller <patmiller@localhost> patmiller <patmiller@localhost>
Saurabh Mehta <e.samehta@gmail.com>
Sebastian Berg <sebastian@sipsolutions.net> seberg <sebastian@sipsolutions.net>
Shota Kawabuchi <shota.kawabuchi+GitHub@gmail.com> skwbc <shota.kawabuchi+GitHub@gmail.com>
+Siavash Eliasi <siavashserver@gmail.com> siavashserver <siavashserver@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <sjvdwalt@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <stefan@sun.ac.za>
Stephan Hoyer <shoyer@gmail.com> Stephan Hoyer <shoyer@climate.com>
Wojtek Ruszczewski <git@wr.waw.pl> wrwrwr <git@wr.waw.pl>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
+luzpaz <luzpaz@users.noreply.github.com> luz.paz <luzpaz@users.noreply.github.com>
- 3.4
- 3.5
- 3.6
- - 3.7-dev
matrix:
include:
+ - python: 3.7
+ dist: xenial # Required for Python 3.7
+ sudo: true # travis-ci/travis-ci#9069
- python: 3.6
- env: USE_CHROOT=1 ARCH=i386 DIST=artful PYTHON=3.6
+ env: USE_CHROOT=1 ARCH=i386 DIST=bionic PYTHON=3.6
sudo: true
addons:
apt:
+ update: true
packages:
+ - dpkg
- debootstrap
- python: 3.4
env: USE_DEBUG=1
- cython3-dbg
- python3-dbg
- python3-dev
- - python3-nose
- python3-setuptools
- python: 3.6
env: USE_WHEEL=1 RUN_FULL_TESTS=1
+ If this is your first time contributing to a project on GitHub, please read
through our
-[guide to contributing to numpy](http://docs.scipy.org/doc/numpy-dev/dev/index.html)
+[guide to contributing to numpy](http://docs.scipy.org/doc/numpy/dev/index.html)
+ If you have contributed to other projects on GitHub you can go straight to our
-[development workflow](http://docs.scipy.org/doc/numpy-dev/dev/gitwash/development_workflow.html)
+[development workflow](http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
Either way, please be sure to follow our
-[convention for commit messages](http://docs.scipy.org/doc/numpy-dev/dev/gitwash/development_workflow.html#writing-the-commit-message).
+[convention for commit messages](http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
If you are writing new C code, please follow the style described in
``doc/C_STYLE_GUIDE``.
**IMPORTANT**: the below notes are about building NumPy, which for most users
is *not* the recommended way to install NumPy. Instead, use either a complete
scientific Python distribution (recommended) or a binary installer - see
-http://scipy.org/install.html.
+https://scipy.org/install.html.
.. Contents::
2) Cython >= 0.19 (for development versions of numpy, not for released
versions)
-3) nose__ (optional) 1.0 or later
+3) pytest__ (optional) 1.15 or later
This is required for testing numpy, but not for using it.
Python__ http://www.python.org
-nose__ http://nose.readthedocs.io
+pytest__ http://pytest.readthedocs.io
.. note::
If you want to build NumPy in order to work on NumPy itself, use
``runtests.py``. For more details, see
- http://docs.scipy.org/doc/numpy-dev/dev/development_environment.html
+ https://docs.scipy.org/doc/numpy/dev/development_environment.html
.. note::
More extensive information on building NumPy (and Scipy) is maintained at
- http://scipy.org/scipylib/building/index.html
+ https://scipy.github.io/devdocs/building/
Basic Installation
itself is still available at https://github.com/numpy/numpy-vendor, but not
recommended for use anymore.
-MingwPy__ http://mingwpy.github.io
+MingwPy__ https://mingwpy.github.io
Building with optimized BLAS support
The Intel compilers work with Intel MKL, see the application note linked above.
MingwPy__ works with OpenBLAS.
For an overview of the state of BLAS/LAPACK libraries on Windows, see
-`here <http://mingwpy.github.io/blas_lapack.html>`_.
+`here <https://mingwpy.github.io/blas_lapack.html>`_.
OS X
----
============
If you run into build issues and need help, the NumPy
-`mailing list <http://scipy.org/scipylib/mailing-lists.html>`_ is the best
+`mailing list <https://scipy.org/scipylib/mailing-lists.html>`_ is the best
place to ask. If the issue is clearly a bug in NumPy, please file an issue (or
even better, a pull request) at https://github.com/numpy/numpy.
recursive-include doc/release *
recursive-include doc/source *
recursive-include doc/sphinxext *
+recursive-include tools/allocation_tracking *
recursive-include tools/swig *
recursive-include doc/scipy-sphinx-theme *
-# <img alt="NumPy" src="branding/icons/numpylogo.svg" height="60">
+# <img alt="NumPy" src="https://cdn.rawgit.com/numpy/numpy/master/branding/icons/numpylogo.svg" height="60">
[](https://travis-ci.org/numpy/numpy)
[](https://ci.appveyor.com/project/charris/numpy)
--- /dev/null
+"""Benchmarks for `numpy.lib`."""
+
+
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class Pad(Benchmark):
+ """Benchmarks for `numpy.pad`."""
+
+ param_names = ["shape", "pad_width", "mode"]
+ params = [
+ [(1000,), (10, 100), (10, 10, 10)],
+ [1, 3, (0, 5)],
+ ["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
+ ]
+
+ def setup(self, shape, pad_width, mode):
+ self.array = np.empty(shape)
+
+ def time_pad(self, shape, pad_width, mode):
+ np.pad(self.array, pad_width, mode)
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
+
+class Permutation(Benchmark):
+ def setup(self):
+ self.n = 10000
+ self.a_1d = np.random.random_sample(self.n)
+ self.a_2d = np.random.random_sample((self.n, 2))
+
+ def time_permutation_1d(self):
+ np.random.permutation(self.a_1d)
+
+ def time_permutation_2d(self):
+ np.random.permutation(self.a_2d)
+
+ def time_permutation_int(self):
+ np.random.permutation(self.n)
'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', 'conjugate',
'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', 'divmod',
'equal', 'exp', 'exp2', 'expm1', 'fabs', 'float_power', 'floor',
- 'floor_divide', 'fmax', 'fmin', 'fmod', 'frexp', 'greater',
- 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf',
- 'isnan', 'isnat', 'ldexp', 'left_shift', 'less', 'less_equal', 'log',
- 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
- 'logical_not', 'logical_or', 'logical_xor', 'maximum', 'minimum',
- 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal',
- 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder',
- 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing',
- 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
+ 'floor_divide', 'fmax', 'fmin', 'fmod', 'frexp', 'gcd', 'greater',
+ 'greater_equal', 'heaviside', 'hypot', 'invert', 'isfinite',
+ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less',
+ 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
+ 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
+ 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply',
+ 'negative', 'nextafter', 'not_equal', 'positive', 'power',
+ 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',
+ 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt',
+ 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc']
+
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
+++ /dev/null
-=========================================
-Building the NumPy API and reference docs
-=========================================
-
-We currently use Sphinx_ for generating the API and reference
-documentation for NumPy. You will need Sphinx 1.0.1 or newer.
-
-If you only want to get the documentation, note that pre-built
-versions can be found at
-
- http://docs.scipy.org/
-
-in several different formats.
-
-.. _Sphinx: http://sphinx.pocoo.org
-
-
-Instructions
-------------
-
-If you obtained NumPy via git, get also the git submodules that contain
-additional parts required for building the documentation::
-
- git submodule init
- git submodule update
-
-In addition, building the documentation requires the Sphinx extension
-`plot_directive`, which is shipped with Matplotlib_. This Sphinx extension can
-be installed with or without completely installing Matplotlib: see the
-Matplotlib documentation for more information.
-
-Since large parts of the main documentation are stored in
-docstrings, you will need to first build NumPy, and install it so
-that the correct version is imported by
-
- >>> import numpy
-
-Note that you can eg. install NumPy to a temporary location and set
-the PYTHONPATH environment variable appropriately.
-
-After NumPy is installed, write::
-
- make html
-
-in the ``doc/`` directory. If all goes well, this will generate a
-``build/html`` subdirectory containing the built documentation. Note
-that building the documentation on Windows is currently not actively
-supported, though it should be possible. (See Sphinx_ documentation
-for more information.)
-
-To build the PDF documentation, do instead::
-
- make latex
- make -C build/latex all-pdf
-
-You will need to have Latex installed for this.
-
-Instead of the above, you can also do::
-
- make dist
-
-which will rebuild NumPy, install it to a temporary location, and
-build the documentation in all formats. This will most likely again
-only work on Unix platforms.
-
-The documentation for NumPy distributed at http://docs.scipy.org in html and
-pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details on
-how to update http://docs.scipy.org.
-
-.. _Matplotlib: http://matplotlib.org/
-.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt
-
-Sphinx extensions
------------------
-
-NumPy's documentation uses several custom extensions to Sphinx. These
-are shipped in the ``sphinxext/`` directory (as git submodules, as discussed
-above), and are automatically enabled when building NumPy's documentation.
-
-If you want to make use of these extensions in third-party
-projects, they are available on PyPi_ as the numpydoc_ package.
-
-.. _PyPi: http://python.org/pypi
-.. _numpydoc: http://python.org/pypi/numpydoc
-====================================
-A Guide to NumPy/SciPy Documentation
-====================================
-
-.. Contents::
-
-.. Note::
-
- For an accompanying example, see `example.py
- <http://github.com/numpy/numpy/blob/master/doc/example.py>`_.
-
- When using `Sphinx <http://sphinx.pocoo.org/>`__ in combination with the
- numpy conventions, you should use the ``numpydoc`` extension so that your
- docstrings will be handled correctly. For example, Sphinx will extract the
- ``Parameters`` section from your docstring and convert it into a field
- list. Using ``numpydoc`` will also avoid the reStructuredText errors produced
- by plain Sphinx when it encounters numpy docstring conventions like
- section headers (e.g. ``-------------``) that sphinx does not expect to
- find in docstrings.
-
- Some features described in this document require a recent version of
- ``numpydoc``. For example, the **Yields** section was added in
- ``numpydoc`` 0.6.
-
- It is available from:
-
- * `numpydoc on PyPI <http://pypi.python.org/pypi/numpydoc>`_
- * `numpydoc on GitHub <https://github.com/numpy/numpydoc/>`_
-
- Details of how to use it can be found `here
- <https://github.com/numpy/numpydoc/blob/master/README.rst>`__ and
- `here
- <https://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.rst.txt>`__
-
-Overview
---------
-We mostly follow the standard Python style conventions as described here:
- * `Style Guide for C Code <http://python.org/dev/peps/pep-0007/>`_
- * `Style Guide for Python Code <http://python.org/dev/peps/pep-0008/>`_
- * `Docstring Conventions <http://python.org/dev/peps/pep-0257/>`_
-
-Additional PEPs of interest regarding documentation of code:
- * `Docstring Processing Framework <http://python.org/dev/peps/pep-0256/>`_
- * `Docutils Design Specification <http://python.org/dev/peps/pep-0258/>`_
-
-Use a code checker:
- * `pylint <http://www.logilab.org/857>`_
- * `pyflakes <https://pypi.python.org/pypi/pyflakes>`_
- * `pep8.py <http://svn.browsershots.org/trunk/devtools/pep8/pep8.py>`_
- * `flake8 <https://pypi.python.org/pypi/flake8>`_
- * `vim-flake8 <https://github.com/nvie/vim-flake8>`_ plugin for
- automatically checking syntax and style with flake8
-
-The following import conventions are used throughout the NumPy source
-and documentation::
-
- import numpy as np
- import matplotlib as mpl
- import matplotlib.pyplot as plt
-
-Do not abbreviate ``scipy``. There is no motivating use case to
-abbreviate it in the real world, so we avoid it in the documentation
-to avoid confusion.
-
-It is not necessary to do ``import numpy as np`` at the beginning of
-an example. However, some sub-modules, such as ``fft``, are not
-imported by default, and you have to include them explicitly::
-
- import numpy.fft
-
-after which you may use it::
-
- np.fft.fft2(...)
-
-Docstring Standard
-------------------
-A documentation string (docstring) is a string that describes a module,
-function, class, or method definition. The docstring is a special attribute
-of the object (``object.__doc__``) and, for consistency, is surrounded by
-triple double quotes, i.e.::
-
- """This is the form of a docstring.
-
- It can be spread over several lines.
-
- """
-
-NumPy, SciPy_, and the scikits follow a common convention for
-docstrings that provides for consistency, while also allowing our
-toolchain to produce well-formatted reference guides. This document
-describes the current community consensus for such a standard. If you
-have suggestions for improvements, post them on the `numpy-discussion
-list`_.
-
-Our docstring standard uses `re-structured text (reST)
-<http://docutils.sourceforge.net/rst.html>`_ syntax and is rendered
-using Sphinx_ (a pre-processor that understands the particular
-documentation style we are using). While a rich set of
-markup is available, we limit ourselves to a very basic subset, in
-order to provide docstrings that are easy to read on text-only
-terminals.
-
-A guiding principle is that human readers of the text are given
-precedence over contorting docstrings so our tools produce nice
-output. Rather than sacrificing the readability of the docstrings, we
-have written pre-processors to assist Sphinx_ in its task.
-
-The length of docstring lines should be kept to 75 characters to
-facilitate reading the docstrings in text terminals.
-
-Sections
---------
-The sections of the docstring are:
-
-1. **Short summary**
-
- A one-line summary that does not use variable names or the function
- name, e.g.
-
- ::
-
- def add(a, b):
- """The sum of two numbers.
-
- """
-
- The function signature is normally found by introspection and
- displayed by the help function. For some functions (notably those
- written in C) the signature is not available, so we have to specify
- it as the first line of the docstring::
-
- """
- add(a, b)
-
- The sum of two numbers.
-
- """
-
-2. **Deprecation warning**
-
- A section (use if applicable) to warn users that the object is deprecated.
- Section contents should include:
-
- * In what NumPy version the object was deprecated, and when it will be
- removed.
-
- * Reason for deprecation if this is useful information (e.g., object
- is superseded, duplicates functionality found elsewhere, etc.).
-
- * New recommended way of obtaining the same functionality.
-
- This section should use the note Sphinx directive instead of an
- underlined section header.
-
- ::
-
- .. note:: Deprecated in NumPy 1.6.0
- `ndobj_old` will be removed in NumPy 2.0.0, it is replaced by
- `ndobj_new` because the latter works also with array subclasses.
-
-3. **Extended Summary**
-
- A few sentences giving an extended description. This section
- should be used to clarify *functionality*, not to discuss
- implementation detail or background theory, which should rather be
- explored in the **Notes** section below. You may refer to the
- parameters and the function name, but parameter descriptions still
- belong in the **Parameters** section.
-
-4. **Parameters**
-
- Description of the function arguments, keywords and their
- respective types.
-
- ::
-
- Parameters
- ----------
- x : type
- Description of parameter `x`.
- y
- Description of parameter `y` (with type not specified)
-
- Enclose variables in single backticks. The colon must be preceded
- by a space, or omitted if the type is absent.
-
- For the parameter types, be as precise as possible. Below are a
- few examples of parameters and their types.
-
- ::
-
- Parameters
- ----------
- filename : str
- copy : bool
- dtype : data-type
- iterable : iterable object
- shape : int or tuple of int
- files : list of str
-
- If it is not necessary to specify a keyword argument, use
- ``optional``::
-
- x : int, optional
-
- Optional keyword parameters have default values, which are
- displayed as part of the function signature. They can also be
- detailed in the description::
-
- Description of parameter `x` (the default is -1, which implies summation
- over all axes).
-
- When a parameter can only assume one of a fixed set of values,
- those values can be listed in braces, with the default appearing first::
-
- order : {'C', 'F', 'A'}
- Description of `order`.
-
- When two or more input parameters have exactly the same type, shape and
- description, they can be combined::
-
- x1, x2 : array_like
- Input arrays, description of `x1`, `x2`.
-
-5. **Returns**
-
- Explanation of the returned values and their types. Similar to the
- **Parameters** section, except the name of each return value is optional.
- The type of each return value is always required::
-
- Returns
- -------
- int
- Description of anonymous integer return value.
-
- If both the name and type are specified, the **Returns** section takes the
- same form as the **Parameters** section::
-
- Returns
- -------
- err_code : int
- Non-zero value indicates error code, or zero on success.
- err_msg : str or None
- Human readable error message, or None on success.
-
-6. **Yields**
-
- Explanation of the yielded values and their types. This is relevant to
- generators only. Similar to the **Returns** section in that the name of
- each value is optional, but the type of each value is always required::
-
- Yields
- ------
- int
- Description of the anonymous integer return value.
-
- If both the name and type are specified, the **Yields** section takes the
- same form as the **Returns** section::
-
- Yields
- ------
- err_code : int
- Non-zero value indicates error code, or zero on success.
- err_msg : str or None
- Human readable error message, or None on success.
-
- Support for the **Yields** section was added in `numpydoc
- <https://github.com/numpy/numpydoc>`_ version 0.6.
-
-7. **Other Parameters**
-
- An optional section used to describe infrequently used parameters.
- It should only be used if a function has a large number of keyword
- parameters, to prevent cluttering the **Parameters** section.
-
-8. **Raises**
-
- An optional section detailing which errors get raised and under
- what conditions::
-
- Raises
- ------
- LinAlgException
- If the matrix is not numerically invertible.
-
- This section should be used judiciously, i.e., only for errors
- that are non-obvious or have a large chance of getting raised.
-
-9. **See Also**
-
- An optional section used to refer to related code. This section
- can be very useful, but should be used judiciously. The goal is to
- direct users to other functions they may not be aware of, or have
- easy means of discovering (by looking at the module docstring, for
- example). Routines whose docstrings further explain parameters
- used by this function are good candidates.
-
- As an example, for ``numpy.mean`` we would have::
-
- See Also
- --------
- average : Weighted average
-
- When referring to functions in the same sub-module, no prefix is
- needed, and the tree is searched upwards for a match.
-
- Prefix functions from other sub-modules appropriately. E.g.,
- whilst documenting the ``random`` module, refer to a function in
- ``fft`` by
-
- ::
-
- fft.fft2 : 2-D fast discrete Fourier transform
-
- When referring to an entirely different module::
-
- scipy.random.norm : Random variates, PDFs, etc.
-
- Functions may be listed without descriptions, and this is
- preferable if the functionality is clear from the function name::
-
- See Also
- --------
- func_a : Function a with its description.
- func_b, func_c, func_d
- func_e
-
-10. **Notes**
-
- An optional section that provides additional information about the
- code, possibly including a discussion of the algorithm. This
- section may include mathematical equations, written in
- `LaTeX <http://www.latex-project.org/>`_ format::
-
- The FFT is a fast implementation of the discrete Fourier transform:
-
- .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
-
- Equations can also be typeset underneath the math directive::
-
- The discrete-time Fourier time-convolution property states that
-
- .. math::
-
- x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )\\
- another equation here
-
- Math can furthermore be used inline, i.e.
-
- ::
-
- The value of :math:`\omega` is larger than 5.
-
- Variable names are displayed in typewriter font, obtained by using
- ``\mathtt{var}``::
-
- We square the input parameter `alpha` to obtain
- :math:`\mathtt{alpha}^2`.
-
- Note that LaTeX is not particularly easy to read, so use equations
- sparingly.
-
- Images are allowed, but should not be central to the explanation;
- users viewing the docstring as text must be able to comprehend its
- meaning without resorting to an image viewer. These additional
- illustrations are included using::
-
- .. image:: filename
-
- where filename is a path relative to the reference guide source
- directory.
-
-11. **References**
-
- References cited in the **notes** section may be listed here,
- e.g. if you cited the article below using the text ``[1]_``,
- include it as in the list as follows::
-
- .. [1] O. McNoleg, "The integration of GIS, remote sensing,
- expert systems and adaptive co-kriging for environmental habitat
- modelling of the Highland Haggis using object-oriented, fuzzy-logic
- and neural-network techniques," Computers & Geosciences, vol. 22,
- pp. 585-588, 1996.
-
- which renders as
-
- .. [1] O. McNoleg, "The integration of GIS, remote sensing,
- expert systems and adaptive co-kriging for environmental habitat
- modelling of the Highland Haggis using object-oriented, fuzzy-logic
- and neural-network techniques," Computers & Geosciences, vol. 22,
- pp. 585-588, 1996.
-
- Referencing sources of a temporary nature, like web pages, is
- discouraged. References are meant to augment the docstring, but
- should not be required to understand it. References are numbered, starting
- from one, in the order in which they are cited.
-
-12. **Examples**
-
- An optional section for examples, using the `doctest
- <http://docs.python.org/library/doctest.html>`_ format.
- This section is meant to illustrate usage, not to provide a
- testing framework -- for that, use the ``tests/`` directory.
- While optional, this section is very strongly encouraged.
-
- When multiple examples are provided, they should be separated by
- blank lines. Comments explaining the examples should have blank
- lines both above and below them::
-
- >>> np.add(1, 2)
- 3
-
- Comment explaining the second example
-
- >>> np.add([1, 2], [3, 4])
- array([4, 6])
-
- For tests with a result that is random or platform-dependent, mark the
- output as such::
-
- >>> import numpy.random
- >>> np.random.rand(2)
- array([ 0.35773152, 0.38568979]) #random
-
- You can run examples as doctests using::
-
- >>> np.test(doctests=True)
- >>> np.linalg.test(doctests=True) # for a single module
-
- In IPython it is also possible to run individual examples simply by
- copy-pasting them in doctest mode::
-
- In [1]: %doctest_mode
- Exception reporting mode: Plain
- Doctest mode is: ON
- >>> %paste
- import numpy.random
- np.random.rand(2)
- ## -- End pasted text --
- array([ 0.8519522 , 0.15492887])
-
-
- It is not necessary to use the doctest markup ``<BLANKLINE>`` to
- indicate empty lines in the output. Note that the option to run
- the examples through ``numpy.test`` is provided for checking if the
- examples work, not for making the examples part of the testing framework.
-
- The examples may assume that ``import numpy as np`` is executed before
- the example code in *numpy*. Additional examples may make use of
- *matplotlib* for plotting, but should import it explicitly, e.g.,
- ``import matplotlib.pyplot as plt``. All other imports, including the
- demonstrated function, must be explicit.
-
-
-Documenting classes
--------------------
-
-Class docstring
-```````````````
-Use the same sections as outlined above (all except ``Returns`` are
-applicable). The constructor (``__init__``) should also be documented
-here, the **Parameters** section of the docstring details the constructors
-parameters.
-
-An **Attributes** section, located below the **Parameters** section,
-may be used to describe non-method attributes of the class::
-
- Attributes
- ----------
- x : float
- The X coordinate.
- y : float
- The Y coordinate.
-
-Attributes that are properties and have their own docstrings can be
-simply listed by name::
-
- Attributes
- ----------
- real
- imag
- x : float
- The X coordinate
- y : float
- The Y coordinate
-
-In general, it is not necessary to list class methods. Those that are
-not part of the public API have names that start with an underscore.
-In some cases, however, a class may have a great many methods, of
-which only a few are relevant (e.g., subclasses of ndarray). Then, it
-becomes useful to have an additional **Methods** section::
-
- class Photo(ndarray):
- """
- Array with associated photographic information.
-
- ...
-
- Attributes
- ----------
- exposure : float
- Exposure in seconds.
-
- Methods
- -------
- colorspace(c='rgb')
- Represent the photo in the given colorspace.
- gamma(n=1.0)
- Change the photo's gamma exposure.
-
- """
-
-If it is necessary to explain a private method (use with care!), it can
-be referred to in the **Extended Summary** or the **Notes** section.
-Do not list private methods in the **methods** section.
-
-Note that `self` is *not* listed as the first parameter of methods.
-
-Method docstrings
-`````````````````
-Document these as you would any other function. Do not include
-``self`` in the list of parameters. If a method has an equivalent function
-(which is the case for many ndarray methods for example), the function
-docstring should contain the detailed documentation, and the method docstring
-should refer to it. Only put brief summary and **See Also** sections in the
-method docstring. The method should use a **Returns** or **Yields** section,
-as appropriate.
-
-
-Documenting class instances
----------------------------
-Instances of classes that are part of the NumPy API (for example `np.r_`
-`np,c_`, `np.index_exp`, etc.) may require some care. To give these
-instances a useful docstring, we do the following:
-
-* Single instance: If only a single instance of a class is exposed,
- document the class. Examples can use the instance name.
-
-* Multiple instances: If multiple instances are exposed, docstrings
- for each instance are written and assigned to the instances'
- ``__doc__`` attributes at run time. The class is documented as usual, and
- the exposed instances can be mentioned in the **Notes** and **See Also**
- sections.
-
-
-Documenting generators
-----------------------
-Generators should be documented just as functions are documented. The
-only difference is that one should use the **Yields** section instead
-of the **Returns** section. Support for the **Yields** section was added in
-`numpydoc <https://github.com/numpy/numpydoc>`_ version 0.6.
-
-
-Documenting constants
----------------------
-Use the same sections as outlined for functions where applicable::
-
- 1. summary
- 2. extended summary (optional)
- 3. see also (optional)
- 4. references (optional)
- 5. examples (optional)
-
-Docstrings for constants will not be visible in text terminals
-(constants are of immutable type, so docstrings can not be assigned
-to them like for for class instances), but will appear in the
-documentation built with Sphinx.
-
-
-Documenting modules
--------------------
-Each module should have a docstring with at least a summary line. Other
-sections are optional, and should be used in the same order as for documenting
-functions when they are appropriate::
-
- 1. summary
- 2. extended summary
- 3. routine listings
- 4. see also
- 5. notes
- 6. references
- 7. examples
-
-Routine listings are encouraged, especially for large modules, for which it is
-hard to get a good overview of all functionality provided by looking at the
-source file(s) or the ``__all__`` dict.
-
-Note that license and author info, while often included in source files, do not
-belong in docstrings.
-
-
-Other points to keep in mind
-----------------------------
-* Equations : as discussed in the **Notes** section above, LaTeX formatting
- should be kept to a minimum. Often it's possible to show equations as
- Python code or pseudo-code instead, which is much more readable in a
- terminal. For inline display use double backticks (like ``y = np.sin(x)``).
- For display with blank lines above and below, use a double colon and indent
- the code, like::
-
- end of previous sentence::
-
- y = np.sin(x)
-
-* Notes and Warnings : If there are points in the docstring that deserve
- special emphasis, the reST directives for a note or warning can be used
- in the vicinity of the context of the warning (inside a section). Syntax::
-
- .. warning:: Warning text.
-
- .. note:: Note text.
-
- Use these sparingly, as they do not look very good in text terminals
- and are not often necessary. One situation in which a warning can
- be useful is for marking a known bug that is not yet fixed.
-
-* array_like : For functions that take arguments which can have not only
- a type `ndarray`, but also types that can be converted to an ndarray
- (i.e. scalar types, sequence types), those arguments can be documented
- with type `array_like`.
-
-Common reST concepts
---------------------
-For paragraphs, indentation is significant and indicates indentation in the
-output. New paragraphs are marked with a blank line.
-
-Use ``*italics*``, ``**bold**`` and ````monospace```` if needed in any
-explanations
-(but not for variable names and doctest code or multi-line code).
-Variable, module, function, and class names should be written between
-single back-ticks (```numpy```).
-
-A more extensive example of reST markup can be found in `this example
-document <http://docutils.sourceforge.net/docs/user/rst/demo.txt>`_;
-the `quick reference
-<http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_ is
-useful while editing.
-
-Line spacing and indentation are significant and should be carefully
-followed.
-
-Conclusion
-----------
-
-`An example <http://github.com/numpy/numpy/blob/master/doc/example.py>`_ of the
-format shown here is available. Refer to `How to Build API/Reference
-Documentation
-<http://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.rst.txt>`_
-on how to use Sphinx_ to build the manual.
-
-This document itself was written in ReStructuredText, and may be converted to
-HTML using::
-
- $ rst2html HOWTO_DOCUMENT.txt HOWTO_DOCUMENT.html
-
-.. _SciPy: http://www.scipy.org
-.. _numpy-discussion list: http://scipy.org/scipylib/mailing-lists.html
-.. _Sphinx: http://sphinx.pocoo.org
+This document has been replaced, see https://numpydoc.readthedocs.io/en/latest/
# Makefile for Sphinx documentation
#
-PYVER = 2.7
+PYVER = 3.6
PYTHON = python$(PYVER)
# You can set these variables from the command line.
-This file contains a walkthrough of the NumPy 1.12.0 release on Fedora Linux.
+This file contains a walkthrough of the NumPy 1.14.4 release on Linux.
The commands can be copied into the command line, but be sure to
-replace 1.12.0 by the correct version.
+replace 1.14.4 by the correct version.
Release Walkthrough
====================
-Building the release
---------------------
+Update Release documentation
+----------------------------
+
+The file ``doc/changelog/1.14.4-changelog.rst`` should be updated to reflect
+the final list of changes and contributors. This text can be generated by::
+
+ $ python tools/changelog.py $GITHUB v1.14.3..maintenance/1.14.x > doc/changelog/1.14.4-changelog.rst
+
+where ``GITHUB`` contains your github access token. This text may also be
+appended to ``doc/release/1.14.4-notes.rst`` for release updates, though not
+for new releases like ``1.14.0``, as the changelogs for latter tend to be
+excessively long. The ``doc/source/release.rst`` file should also be
+updated with a link to the new release notes.
+
+Prepare the release commit
+--------------------------
Checkout the branch for the release, make sure it is up to date, and clean the
repository::
$ git submodule update
$ git clean -xdf
-Look at the git log to get the hash of the last commit in the release, then
-check it out::
-
- $ git log
- $ git checkout 7849751173fb47a5f17761b3515b42b4d8ce1197
-
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
$ gvim pavement.py setup.py
- $ git commit -a -m"REL: NumPy 1.14.1 release."
+ $ git commit -a -m"REL: NumPy 1.14.4 release."
Sanity check::
$ python runtests.py -m "full"
$ python3 runtests.py -m "full"
-Tag it,and build the source distribution archives::
+Push this release directly onto the end of the maintenance branch. This
+requires write permission to the numpy repository::
- $ git tag -s v1.14.1
- $ paver sdist # sdist will do a git clean -xdf, so we omit that
+ $ git push upstream maintenance/1.14.x
-Check that the files in ``release/installers`` have the correct versions, then
-push the tag upstream; generation of the wheels for PyPI needs it::
+As an example, see the 1.14.3 REL commit: `<https://github.com/numpy/numpy/commit/73299826729be58cec179b52c656adfcaefada93>`_.
- $ git push upstream v1.14.1
+Build source releases
+---------------------
-Trigger the wheels build. This can take a while. The numpy-wheels repository is
-cloned from `<https://github.com/MacPython/numpy-wheels>`_. Start with a pull
-as the repo may have been accessed and changed by someone else and a push will
-fail.
+Paver is used to build the source releases. It will create the ``release`` and
+``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz``
+source releases in the latter.
+
+ $ paver sdist # sdist will do a git clean -xdf, so we omit that
+
+Build wheels
+------------
+
+Trigger the wheels build by pointing the numpy-wheels repository at this
+commit. This can take a while. The numpy-wheels repository is cloned from
+`<https://github.com/MacPython/numpy-wheels>`_. Start with a pull as the repo
+may have been accessed and changed by someone else and a push will fail::
$ cd ../numpy-wheels
$ git pull origin master
$ git branch <new version> # only when starting new numpy version
- $ git checkout v1.14.x # v1.14.x already existed for the 1.14.1 release
+ $ git checkout v1.14.x # v1.14.x already existed for the 1.14.4 release
-The ``.travis.yml`` and ``appveyor.yml`` files need to be edited to make
-sure they have the correct version, search for ``BUILD_COMMIT``.
+Edit the ``.travis.yml`` and ``.appveyor.yml`` files to make sure they have the
+correct version, and put in the commit hash for the ``REL`` commit created
+above for ``BUILD_COMMIT``, see the _example from `v1.14.3`::
$ gvim .travis.yml appveyor.yml
$ git commit -a
and appveyor build status. Check if all the needed wheels have been built and
uploaded before proceeding. There should currently be 22 of them at
`<https://wheels.scipy.org>`_, 4 for Mac, 8 for Windows, and 10 for Linux.
+Note that sometimes builds, like tests, fail for unrelated reasons and you will
+need to restart them.
+.. example_: https://github.com/MacPython/numpy-wheels/commit/fed9c04629c155e7804282eb803d81097244598d
Download wheels
---------------
-When the wheels have all been built, download them using the ``wheel-uploader``
+When the wheels have all been successfully built, download them using the ``wheel-uploader``
in the ``terryfy`` repository. The terryfy repository may be cloned from
`<https://github.com/MacPython/terryfy>`_ if you don't already have it. The
wheels can also be uploaded using the ``wheel-uploader``, but we prefer to
download all the wheels to the ``../numpy/release/installers`` directory and
-upload later using ``twine``.
+upload later using ``twine``::
$ cd ../terryfy
$ git pull origin master
$ CDN_URL=https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com
$ NPY_WHLS=../numpy/release/installers
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.1
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t manylinux1 numpy 1.14.1
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t macosx numpy 1.14.1
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.4
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t manylinux1 numpy 1.14.4
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t macosx numpy 1.14.4
If you do this often, consider making CDN_URL and NPY_WHLS part of your default
-environment.
+environment. Note that we need local copies of the files in order to generate
+hashes to include in the README files generated later.
+
+Tag the release
+---------------
+
+Once the wheels have been built and downloaded without errors, go back to your
+numpy repository in the maintenance branch and tag the ``REL`` commit, signing
+it with your gpg key, and build the source distribution archives::
+ $ git tag -s v1.14.4
+
+You should upload your public gpg key to github, so that the tag will appear
+"verified" there.
+
+Check that the files in ``release/installers`` have the correct versions, then
+push the tag upstream::
+
+ $ git push upstream v1.14.4
+
+We wait until this point to push the tag because it is very difficult to change
+the tag after it has been pushed.
+
+Reset the maintenance branch into a development state
+-----------------------------------------------------
+
+Add another ``REL`` commit to the numpy maintenance branch, which resets the
+``ISREALEASED`` flag to ``False`` and increments the version counter::
+
+ $ gvim pavement.py setup.py
+ $ git commit -a -m"REL: prepare 1.14.x for further development"
+ $ git push upstream maintenance/1.14.x
+
+This strategy is copied from the scipy release procedure and was used in numpy
+for the first time in 1.14.3. It needed to be modified a little since numpy
+has more strict requirements for the version number.
Upload to PyPI
--------------
-Upload to PyPI using ``twine``. The choice here is to sign the files, so will
-need to sign every file separately when they are uploaded, keeping the gpg pass
-phrase in the clipboard and pasting it in will make that easier. We may chose
-to forgo the signing in the future::
+Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed
+after recent PyPI changes, version ``1.11.0`` was used here. ::
$ cd ../numpy
- $ twine upload -s release/installers/*.whl
- $ twine upload -s release/installers/numpy-1.14.1.zip # Upload last.
+ $ twine upload release/installers/*.whl
+ $ twine upload release/installers/numpy-1.14.4.zip # Upload last.
If one of the commands breaks in the middle, which is not uncommon, you may
need to selectively upload the remaining files because PyPI does not allow the
process. Note that PyPI only allows a single source distribution, here we have
chosen the zip archive.
-If this is not a final release, log into PyPI and hide the new directory while
-making sure the last stable release is visible.
-
-
Upload files to github
----------------------
-Generate the ``release/README`` files::
+Generate the ``release/README.*`` files::
- $ rm release/installers/*.asc
$ paver write_release_and_log
-Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.1
+Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.4
tag``, click on it and hit the edit button for that tag. There are two ways to
add files, using an editable text window and as binary uploads.
- Cut and paste the ``release/README.md`` file contents into the text window.
-- Upload ``release/installers/numpy-1.12.0.tar.gz`` as a binary file.
-- Upload ``release/installers/numpy-1.12.0.zip`` as a binary file.
-- Upload ``release/README`` as a binary file.
-- Upload ``doc/changelog/1.14.1-changelog.rst`` as a binary file.
+- Upload ``release/installers/numpy-1.14.4.tar.gz`` as a binary file.
+- Upload ``release/installers/numpy-1.14.4.zip`` as a binary file.
+- Upload ``release/README.rst`` as a binary file.
+- Upload ``doc/changelog/1.14.4-changelog.rst`` as a binary file.
- Check the pre-release button if this is a pre-releases.
- Hit the ``{Publish,Update} release`` button at the bottom.
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.1
+ $ make upload USERNAME=<yourname> RELEASE=v1.14.4
$ popd
If the release series is a new one, you will need to rebuild and upload the
$ cd ../scipy.org
$ git checkout master
$ git pull upstream master
- $ git checkout -b numpy-1.14.1
+ $ git checkout -b numpy-1.14.4
$ gvim www/index.rst # edit the News section
$ git commit -a
$ git push origin HEAD
The release should be announced on the numpy-discussion, scipy-devel,
scipy-user, and python-announce-list mailing lists. Look at previous
-announcements for the basic template. The contributor list can be generated as
-follows::
-
- $ cd ../numpy
- $ ./tools/changelog.py $GITHUB v1.14.0..v1.14.1 > tmp.rst
-
-The contents of ``tmp.rst`` can then be cut and pasted into the announcement
-email.
+announcements for the basic template. The contributor and PR lists
+are the same as generated for the release notes above.
-.. -*- rest -*-
-
NumPy/SciPy Testing Guidelines
==============================
.. contents::
+
Introduction
''''''''''''
-SciPy uses the `Nose testing system
-<http://nose.readthedocs.io>`__, with some
-minor convenience features added. Nose is an extension of the unit
-testing framework offered by `unittest.py
-<http://docs.python.org/lib/module-unittest.html>`__. Our goal is that
-every module and package in SciPy should have a thorough set of unit
+Until the 1.15 release, NumPy used the `nose`_ testing framework, it now uses
+the `pytest`_ framework. The older framework is still maintained in order to
+support downstream projects that use the old numpy framework, but all tests
+for NumPy should use pytest.
+
+Our goal is that every module and package in SciPy and NumPy
+should have a thorough set of unit
tests. These tests should exercise the full functionality of a given
routine as well as its robustness to erroneous or unexpected input
arguments. Long experience has shown that by far the best time to
write the tests is before you write or change the code - this is
`test-driven development
-<http://en.wikipedia.org/wiki/Test-driven_development>`__. The
+<https://en.wikipedia.org/wiki/Test-driven_development>`__. The
arguments for this can sound rather abstract, but we can assure you
that you will find that writing the tests first leads to more robust
and better designed code. Well-designed tests with good coverage make
>>> import scipy
>>> scipy.test()
+or from the command line::
+
+ $ python runtests.py
+
SciPy uses the testing framework from NumPy (specifically
-``numpy.testing``), so all the SciPy examples shown here are also
-applicable to NumPy. So NumPy's full test suite can be run as
+:ref:`numpy-testing`), so all the SciPy examples shown here are also
+applicable to NumPy. NumPy's full test suite can be run as
follows::
>>> import numpy
Finally, if you are only interested in testing a subset of SciPy, for
example, the ``integrate`` module, use the following::
->>> scipy.integrate.test()
+ >>> scipy.integrate.test()
+
+or from the command line::
+
+ $python runtests.py -t scipy/integrate/tests
The rest of this page will give you a basic idea of how to add unit
tests to modules in SciPy. It is extremely important for us to have
Every Python module, extension module, or subpackage in the SciPy
package directory should have a corresponding ``test_<name>.py`` file.
-Nose examines these files for test methods (named test*) and test
+Pytest examines these files for test methods (named test*) and test
classes (named Test*).
Suppose you have a SciPy module ``scipy/xxx/yyy.py`` containing a
at the bottom.
-Labeling tests with nose
-------------------------
+Labeling tests
+--------------
+
+As an alternative to ``pytest.mark.<label>``, there are a number of labels you
+can use.
Unlabeled tests like the ones above are run in the default
``scipy.test()`` run. If you want to label your test as slow - and
therefore reserved for a full ``scipy.test(label='full')`` run, you
-can label it with a nose decorator::
+can label it with a decorator::
# numpy.testing module includes 'import decorators as dec'
from numpy.testing import dec, assert_
def test_simple(self):
assert_(zzz() == 'Hello from zzz')
+Available labels are:
+
+- ``slow``: marks a test as taking a long time
+- ``setastest(tf)``: work-around for test discovery when the test name is
+ non conformant
+- ``skipif(condition, msg=None)``: skips the test when ``eval(condition)`` is
+ ``True``
+- ``knownfailureif(fail_cond, msg=None)``: will avoid running the test if
+ ``eval(fail_cond)`` is ``True``, useful for tests that conditionally segfault
+- ``deprecated(conditional=True)``: filters deprecation warnings emitted in the
+ test
+- ``paramaterize(var, input)``: an alternative to
+ `pytest.mark.paramaterized
+ <https://docs.pytest.org/en/latest/parametrize.html>`_
+
Easier setup and teardown functions / methods
---------------------------------------------
-Nose looks for module level setup and teardown functions by name;
-thus::
+Testing looks for module-level or class-level setup and teardown functions by
+name; thus::
def setup():
"""Module-level setup"""
print 'doing teardown'
-You can add setup and teardown functions to functions and methods with
-nose decorators::
-
- import nose
- # import all functions from numpy.testing that are needed
- from numpy.testing import assert_, assert_array_almost_equal
+ class TestMe(object):
+ def setup():
+ """Class-level setup"""
+ print 'doing setup'
- def setup_func():
- """A trivial setup function."""
- global helpful_variable
- helpful_variable = 'pleasant'
- print "In setup_func"
+ def teardown():
+ """Class-level teardown"""
+ print 'doing teardown'
- def teardown_func():
- """A trivial teardown function."""
- global helpful_variable
- del helpful_variable
- print "In teardown_func"
- @nose.with_setup(setup_func, teardown_func)
- def test_with_extras():
- # This test uses the setup/teardown functions.
- global helpful_variable
- print " In test_with_extras"
- print " Helpful is %s" % helpful_variable
+Setup and teardown functions to functions and methods are known as "fixtures",
+and their use is not encouraged.
Parametric tests
----------------
-One very nice feature of nose is allowing easy testing across a range
-of parameters - a nasty problem for standard unit tests. It does this
-with test generators::
-
- def check_even(n, nn):
- """A check function to be used in a test generator."""
- assert_(n % 2 == 0 or nn % 2 == 0)
-
- def test_evens():
- for i in range(0,4,2):
- yield check_even, i, i*3
-
-Note that ``check_even`` is not itself a test (no 'test' in the name),
-but ``test_evens`` is a generator that returns a series of tests, using
-``check_even``, across a range of inputs.
-
-A problem with generator tests can be that if a test is failing, it's
-hard to see for which parameters. To avoid this problem, ensure that:
-
- - No computation related to the features tested is done in the
- ``test_*`` generator function, but delegated to a corresponding
- ``check_*`` function (can be inside the generator, to share namespace).
- - The generators are used *solely* for loops over parameters.
- - Those parameters are *not* arrays.
-
-.. warning::
-
- Parametric tests cannot be implemented on classes derived from
- TestCase.
+One very nice feature of testing is allowing easy testing across a range
+of parameters - a nasty problem for standard unit tests. Use the
+``dec.paramaterize`` decorator.
Doctests
--------
all the common tests, and then create a subclass for each variation.
Several examples of this technique exist in NumPy; below are excerpts
from one in `numpy/linalg/tests/test_linalg.py
-<http://github.com/numpy/numpy/blob/master/numpy/linalg/tests/test_linalg.py>`__::
+<https://github.com/numpy/numpy/blob/master/numpy/linalg/tests/test_linalg.py>`__::
class LinalgTestCase:
def test_single(self):
deterministic by setting the random number seed before generating it. Use
either Python's ``random.seed(some_number)`` or NumPy's
``numpy.random.seed(some_number)``, depending on the source of random numbers.
+
+
+.. _nose: https://nose.readthedocs.io/en/latest/
+.. _pytest: https://pytest.readthedocs.io
+.. _parameterization: https://docs.pytest.org/en/latest/parametrize.html
--- /dev/null
+
+Contributors
+============
+
+A total of 6 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jonathan March +
+* Malcolm Smith +
+* Matti Picus
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
+* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
+* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
+* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
--- /dev/null
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Marten van Kerkwijk
+* Matti Picus
+* Pauli Virtanen
+* Ryan Soklaski +
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 11 pull requests were merged for this release.
+
+* `#11104 <https://github.com/numpy/numpy/pull/11104>`__: BUG: str of DOUBLE_DOUBLE format wrong on ppc64
+* `#11170 <https://github.com/numpy/numpy/pull/11170>`__: TST: linalg: add regression test for gh-8577
+* `#11174 <https://github.com/numpy/numpy/pull/11174>`__: MAINT: add sanity-checks to be run at import time
+* `#11181 <https://github.com/numpy/numpy/pull/11181>`__: BUG: void dtype setup checked offset not actual pointer for alignment
+* `#11194 <https://github.com/numpy/numpy/pull/11194>`__: BUG: Python2 doubles don't print correctly in interactive shell.
+* `#11198 <https://github.com/numpy/numpy/pull/11198>`__: BUG: optimizing compilers can reorder call to npy_get_floatstatus
+* `#11199 <https://github.com/numpy/numpy/pull/11199>`__: BUG: reduce using SSE only warns if inside SSE loop
+* `#11203 <https://github.com/numpy/numpy/pull/11203>`__: BUG: Bytes delimiter/comments in genfromtxt should be decoded
+* `#11211 <https://github.com/numpy/numpy/pull/11211>`__: BUG: Fix reference count/memory leak exposed by better testing
+* `#11219 <https://github.com/numpy/numpy/pull/11219>`__: BUG: Fixes einsum broadcasting bug when optimize=True
+* `#11251 <https://github.com/numpy/numpy/pull/11251>`__: DOC: Document 1.14.4 release.
--- /dev/null
+
+Contributors
+============
+
+A total of 1 person contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+
+Pull requests merged
+====================
+
+A total of 2 pull requests were merged for this release.
+
+* `#11274 <https://github.com/numpy/numpy/pull/11274>`__: BUG: Correct use of NPY_UNUSED.
+* `#11294 <https://github.com/numpy/numpy/pull/11294>`__: BUG: Remove extra trailing parentheses.
--- /dev/null
+
+Contributors
+============
+
+A total of 133 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aaron Critchley +
+* Aarthi +
+* Aarthi Agurusa +
+* Alex Thomas +
+* Alexander Belopolsky
+* Allan Haldane
+* Anas Khan +
+* Andras Deak
+* Andrey Portnoy +
+* Anna Chiara
+* Aurelien Jarno +
+* Baurzhan Muftakhidinov
+* Berend Kapelle +
+* Bernhard M. Wiedemann
+* Bjoern Thiel +
+* Bob Eldering
+* Cenny Wenner +
+* Charles Harris
+* ChloeColeongco +
+* Chris Billington +
+* Christopher +
+* Chun-Wei Yuan +
+* Claudio Freire +
+* Daniel Smith
+* Darcy Meyer +
+* David Abdurachmanov +
+* David Freese
+* Deepak Kumar Gouda +
+* Dennis Weyland +
+* Derrick Williams +
+* Dmitriy Shalyga +
+* Eric Cousineau +
+* Eric Larson
+* Eric Wieser
+* Evgeni Burovski
+* Frederick Lefebvre +
+* Gaspar Karm +
+* Geoffrey Irving
+* Gerhard Hobler +
+* Gerrit Holl
+* Guo Ci +
+* Hameer Abbasi +
+* Han Shen
+* Hiroyuki V. Yamazaki +
+* Hong Xu
+* Ihor Melnyk +
+* Jaime Fernandez
+* Jake VanderPlas +
+* James Tocknell +
+* Jarrod Millman
+* Jeff VanOss +
+* John Kirkham
+* Jonas Rauber +
+* Jonathan March +
+* Joseph Fox-Rabinovitz
+* Julian Taylor
+* Junjie Bai +
+* Juris Bogusevs +
+* Jörg Döpfert
+* Kenichi Maehashi +
+* Kevin Sheppard
+* Kimikazu Kato +
+* Kirit Thadaka +
+* Kritika Jalan +
+* Kyle Sunden +
+* Lakshay Garg +
+* Lars G +
+* Licht Takeuchi
+* Louis Potok +
+* Luke Zoltan Kelley
+* MSeifert04 +
+* Mads R. B. Kristensen +
+* Malcolm Smith +
+* Mark Harfouche +
+* Marten H. van Kerkwijk +
+* Marten van Kerkwijk
+* Matheus Vieira Portela +
+* Mathieu Lamarre
+* Mathieu Sornay +
+* Matthew Brett
+* Matthew Rocklin +
+* Matthias Bussonnier
+* Matti Picus
+* Michael Droettboom
+* Miguel Sánchez de León Peque +
+* Mike Toews +
+* Milo +
+* Nathaniel J. Smith
+* Nelle Varoquaux
+* Nicholas Nadeau, P.Eng., AVS +
+* Nick Minkyu Lee +
+* Nikita +
+* Nikita Kartashov +
+* Nils Becker +
+* Oleg Zabluda
+* Orestis Floros +
+* Pat Gunn +
+* Paul van Mulbregt +
+* Pauli Virtanen
+* Pierre Chanial +
+* Ralf Gommers
+* Raunak Shah +
+* Robert Kern
+* Russell Keith-Magee +
+* Ryan Soklaski +
+* Samuel Jackson +
+* Sebastian Berg
+* Siavash Eliasi +
+* Simon Conseil
+* Simon Gibbons
+* Stefan Krah +
+* Stefan van der Walt
+* Stephan Hoyer
+* Subhendu +
+* Subhendu Ranjan Mishra +
+* Tai-Lin Wu +
+* Tobias Fischer +
+* Toshiki Kataoka +
+* Tyler Reddy +
+* Unknown +
+* Varun Nayyar
+* Victor Rodriguez +
+* Warren Weckesser
+* William D. Irons +
+* Zane Bradley +
+* cclauss +
+* fo40225 +
+* lapack_lite code generator +
+* lumbric +
+* luzpaz +
+* mamrehn +
+* tynn +
+* xoviat
+
+Pull requests merged
+====================
+
+A total of 438 pull requests were merged for this release.
+
+* `#8157 <https://github.com/numpy/numpy/pull/8157>`__: BUG: void .item() doesn't hold reference to original array
+* `#8774 <https://github.com/numpy/numpy/pull/8774>`__: ENH: Add gcd and lcm ufuncs
+* `#8819 <https://github.com/numpy/numpy/pull/8819>`__: ENH: Implement axes keyword argument for gufuncs.
+* `#8952 <https://github.com/numpy/numpy/pull/8952>`__: MAINT: Removed duplicated code around `ufunc->identity`
+* `#9686 <https://github.com/numpy/numpy/pull/9686>`__: DEP: Deprecate non-tuple nd-indices
+* `#9980 <https://github.com/numpy/numpy/pull/9980>`__: MAINT: Implement `lstsq` as a `gufunc`
+* `#9998 <https://github.com/numpy/numpy/pull/9998>`__: ENH: Nditer as context manager
+* `#10073 <https://github.com/numpy/numpy/pull/10073>`__: ENH: Implement fft.fftshift/ifftshift with np.roll for improved...
+* `#10078 <https://github.com/numpy/numpy/pull/10078>`__: DOC: document nested_iters
+* `#10128 <https://github.com/numpy/numpy/pull/10128>`__: BUG: Prefix library names with `lib` on windows.
+* `#10142 <https://github.com/numpy/numpy/pull/10142>`__: DEP: Pending deprecation warning for matrix
+* `#10154 <https://github.com/numpy/numpy/pull/10154>`__: MAINT: Use a StructSequence in place of the typeinfo tuples
+* `#10158 <https://github.com/numpy/numpy/pull/10158>`__: BUG: Fix a few smaller valgrind errors
+* `#10178 <https://github.com/numpy/numpy/pull/10178>`__: MAINT: Prepare master for 1.15 development.
+* `#10186 <https://github.com/numpy/numpy/pull/10186>`__: MAINT: Move histogram and histogramdd into their own module
+* `#10187 <https://github.com/numpy/numpy/pull/10187>`__: BUG: Extra space is inserted on first line for long elements
+* `#10192 <https://github.com/numpy/numpy/pull/10192>`__: DEP: Deprecate the pickle aliases
+* `#10193 <https://github.com/numpy/numpy/pull/10193>`__: BUG: Fix bugs found by testing in release mode.
+* `#10194 <https://github.com/numpy/numpy/pull/10194>`__: BUG, MAINT: Ufunc reduce reference leak
+* `#10195 <https://github.com/numpy/numpy/pull/10195>`__: DOC: Fixup percentile docstring, from review in gh-9213
+* `#10196 <https://github.com/numpy/numpy/pull/10196>`__: BUG: Fix regression in np.ma.load in gh-10055
+* `#10199 <https://github.com/numpy/numpy/pull/10199>`__: ENH: Quantile
+* `#10203 <https://github.com/numpy/numpy/pull/10203>`__: MAINT: Update development branch version to 1.15.0.
+* `#10205 <https://github.com/numpy/numpy/pull/10205>`__: BUG: Handle NaNs correctly in arange
+* `#10207 <https://github.com/numpy/numpy/pull/10207>`__: ENH: Allow `np.r_` to accept 0d arrays
+* `#10208 <https://github.com/numpy/numpy/pull/10208>`__: MAINT: Improve error message for void(-1)
+* `#10210 <https://github.com/numpy/numpy/pull/10210>`__: DOC: change 'a'->'prototype' in empty_like docs (addresses #10209)
+* `#10211 <https://github.com/numpy/numpy/pull/10211>`__: MAINT,ENH: remove MaskedArray.astype, as the base type does everything.
+* `#10212 <https://github.com/numpy/numpy/pull/10212>`__: DOC: fix minor typos
+* `#10213 <https://github.com/numpy/numpy/pull/10213>`__: ENH: Set up proposed NEP process
+* `#10214 <https://github.com/numpy/numpy/pull/10214>`__: DOC: add warning to isclose function
+* `#10216 <https://github.com/numpy/numpy/pull/10216>`__: BUG: Fix broken format string picked up by LGTM.com
+* `#10220 <https://github.com/numpy/numpy/pull/10220>`__: DOC: clarify that np.absolute == np.abs
+* `#10223 <https://github.com/numpy/numpy/pull/10223>`__: ENH: added masked version of 'numpy.stack' with tests.
+* `#10225 <https://github.com/numpy/numpy/pull/10225>`__: ENH: distutils: parallelize builds by default
+* `#10226 <https://github.com/numpy/numpy/pull/10226>`__: BUG: distutils: use correct top-level package name
+* `#10229 <https://github.com/numpy/numpy/pull/10229>`__: BUG: distutils: fix extra DLL loading in certain scenarios
+* `#10231 <https://github.com/numpy/numpy/pull/10231>`__: BUG: Fix sign-compare warnings in datetime.c and datetime_strings.c.
+* `#10232 <https://github.com/numpy/numpy/pull/10232>`__: BUG: Don't reimplement isclose in np.ma
+* `#10237 <https://github.com/numpy/numpy/pull/10237>`__: DOC: give correct version of np.nansum change
+* `#10241 <https://github.com/numpy/numpy/pull/10241>`__: MAINT: Avoid repeated validation of percentiles in nanpercentile
+* `#10247 <https://github.com/numpy/numpy/pull/10247>`__: MAINT: fix typo
+* `#10248 <https://github.com/numpy/numpy/pull/10248>`__: DOC: Add installation notes for Linux users
+* `#10249 <https://github.com/numpy/numpy/pull/10249>`__: MAINT: Fix tests failures on travis CI merge.
+* `#10250 <https://github.com/numpy/numpy/pull/10250>`__: MAINT: Check for `__array_ufunc__` before doing anything else.
+* `#10251 <https://github.com/numpy/numpy/pull/10251>`__: ENH: Enable AVX2/AVX512 support to numpy
+* `#10252 <https://github.com/numpy/numpy/pull/10252>`__: MAINT: Workaround for new travis sdist failures.
+* `#10255 <https://github.com/numpy/numpy/pull/10255>`__: MAINT: Fix loop and simd sign-compare warnings.
+* `#10257 <https://github.com/numpy/numpy/pull/10257>`__: BUG: duplicate message print if warning raises an exception
+* `#10259 <https://github.com/numpy/numpy/pull/10259>`__: BUG: Make sure einsum default value of `optimize` is True.
+* `#10260 <https://github.com/numpy/numpy/pull/10260>`__: ENH: Add pytest support
+* `#10261 <https://github.com/numpy/numpy/pull/10261>`__: MAINT: Extract helper functions from histogram
+* `#10262 <https://github.com/numpy/numpy/pull/10262>`__: DOC: Add missing release note for #10207
+* `#10263 <https://github.com/numpy/numpy/pull/10263>`__: BUG: Fix strange behavior of infinite-step-size/underflow-case...
+* `#10264 <https://github.com/numpy/numpy/pull/10264>`__: MAINT: Fix (some) yield warnings
+* `#10266 <https://github.com/numpy/numpy/pull/10266>`__: BUG: distutils: fix locale decoding errors
+* `#10268 <https://github.com/numpy/numpy/pull/10268>`__: BUG: Fix misleading error when coercing to array
+* `#10269 <https://github.com/numpy/numpy/pull/10269>`__: MAINT: extract private helper function to compute histogram bin...
+* `#10271 <https://github.com/numpy/numpy/pull/10271>`__: BUG: Allow nan values in the data when the bins are explicit
+* `#10278 <https://github.com/numpy/numpy/pull/10278>`__: ENH: Add support for datetimes to histograms
+* `#10282 <https://github.com/numpy/numpy/pull/10282>`__: MAINT: Extract helper function for last-bound-inclusive search_sorted
+* `#10283 <https://github.com/numpy/numpy/pull/10283>`__: MAINT: Fallback on the default sequence multiplication behavior
+* `#10284 <https://github.com/numpy/numpy/pull/10284>`__: MAINT/BUG: Tidy gen_umath
+* `#10286 <https://github.com/numpy/numpy/pull/10286>`__: BUG: Fix memory leak (#10157).
+* `#10287 <https://github.com/numpy/numpy/pull/10287>`__: ENH: Allow ptp to take an axis tuple and keepdims
+* `#10292 <https://github.com/numpy/numpy/pull/10292>`__: BUG: Masked singleton can be reshaped to be non-scalar
+* `#10293 <https://github.com/numpy/numpy/pull/10293>`__: MAINT: Fix sign-compare warnings in mem_overlap.c.
+* `#10294 <https://github.com/numpy/numpy/pull/10294>`__: MAINT: pytest cleanups
+* `#10298 <https://github.com/numpy/numpy/pull/10298>`__: DOC: Explain np.digitize and np.searchsorted more clearly
+* `#10300 <https://github.com/numpy/numpy/pull/10300>`__: MAINT, DOC: Documentation and misc. typos
+* `#10303 <https://github.com/numpy/numpy/pull/10303>`__: MAINT: Array wrap/prepare identification cleanup
+* `#10309 <https://github.com/numpy/numpy/pull/10309>`__: MAINT: deduplicate check_nonreorderable_axes
+* `#10314 <https://github.com/numpy/numpy/pull/10314>`__: BUG: Ensure `__array_finalize__` cannot back-mangle shape
+* `#10316 <https://github.com/numpy/numpy/pull/10316>`__: DOC: add documentation about how to handle new array printing
+* `#10320 <https://github.com/numpy/numpy/pull/10320>`__: BUG: skip the extra-dll directory when there are no DLLS
+* `#10323 <https://github.com/numpy/numpy/pull/10323>`__: MAINT: Remove duplicated code for promoting dtype and array types.
+* `#10324 <https://github.com/numpy/numpy/pull/10324>`__: BUG: Fix crashes when using float32 values in uniform histograms
+* `#10325 <https://github.com/numpy/numpy/pull/10325>`__: MAINT: Replace manual expansion of PyArray_MinScalarType with...
+* `#10327 <https://github.com/numpy/numpy/pull/10327>`__: MAINT: Fix misc. typos
+* `#10333 <https://github.com/numpy/numpy/pull/10333>`__: DOC: typo fix in numpy.linalg.det docstring
+* `#10334 <https://github.com/numpy/numpy/pull/10334>`__: DOC: Fix typos in docs for partition method
+* `#10336 <https://github.com/numpy/numpy/pull/10336>`__: DOC: Post 1.14.0 release updates.
+* `#10337 <https://github.com/numpy/numpy/pull/10337>`__: ENH: Show the silenced error and traceback in warning `__cause__`
+* `#10341 <https://github.com/numpy/numpy/pull/10341>`__: BUG: fix config where PATH isn't set on win32
+* `#10342 <https://github.com/numpy/numpy/pull/10342>`__: BUG: arrays not being flattened in `union1d`
+* `#10346 <https://github.com/numpy/numpy/pull/10346>`__: ENH: Check matching inputs/outputs in umath generation
+* `#10352 <https://github.com/numpy/numpy/pull/10352>`__: BUG: Fix einsum optimize logic for singleton dimensions
+* `#10354 <https://github.com/numpy/numpy/pull/10354>`__: BUG: fix error message not formatted in einsum
+* `#10359 <https://github.com/numpy/numpy/pull/10359>`__: BUG: do not optimize einsum with only 2 arguments.
+* `#10361 <https://github.com/numpy/numpy/pull/10361>`__: BUG: complex repr has extra spaces, missing +
+* `#10362 <https://github.com/numpy/numpy/pull/10362>`__: MAINT: Update download URL in setup.py.
+* `#10367 <https://github.com/numpy/numpy/pull/10367>`__: BUG: add missing paren and remove quotes from repr of fieldless...
+* `#10371 <https://github.com/numpy/numpy/pull/10371>`__: BUG: fix einsum issue with unicode input and py2
+* `#10381 <https://github.com/numpy/numpy/pull/10381>`__: BUG/ENH: Improve output for structured non-void types
+* `#10388 <https://github.com/numpy/numpy/pull/10388>`__: ENH: Add types for int and uint of explicit sizes to swig.
+* `#10390 <https://github.com/numpy/numpy/pull/10390>`__: MAINT: Adjust type promotion in linalg.norm
+* `#10391 <https://github.com/numpy/numpy/pull/10391>`__: BUG: Make dtype.descr error for out-of-order fields
+* `#10392 <https://github.com/numpy/numpy/pull/10392>`__: DOC: Document behaviour of `np.concatenate` with `axis=None`
+* `#10401 <https://github.com/numpy/numpy/pull/10401>`__: BUG: Resize bytes_ columns in genfromtxt
+* `#10402 <https://github.com/numpy/numpy/pull/10402>`__: DOC: added "steals a reference" to PyArray_FromAny
+* `#10406 <https://github.com/numpy/numpy/pull/10406>`__: ENH: add `np.printoptions`, a context manager
+* `#10411 <https://github.com/numpy/numpy/pull/10411>`__: BUG: Revert multifield-indexing adds padding bytes for NumPy...
+* `#10412 <https://github.com/numpy/numpy/pull/10412>`__: ENH: Fix repr of np.record objects to match np.void types
+* `#10414 <https://github.com/numpy/numpy/pull/10414>`__: MAINT: Fix sign-compare warnings in umath_linalg.
+* `#10415 <https://github.com/numpy/numpy/pull/10415>`__: MAINT: Fix sign-compare warnings in npy_binsearch, npy_partition.
+* `#10416 <https://github.com/numpy/numpy/pull/10416>`__: MAINT: Fix sign-compare warnings in dragon4.c.
+* `#10418 <https://github.com/numpy/numpy/pull/10418>`__: MAINT: Remove repeated #ifdefs implementing `isinstance(x, basestring)`...
+* `#10420 <https://github.com/numpy/numpy/pull/10420>`__: DOC: Fix version added labels in numpy.unique docs
+* `#10421 <https://github.com/numpy/numpy/pull/10421>`__: DOC: Fix type of axis in nanfunctions
+* `#10423 <https://github.com/numpy/numpy/pull/10423>`__: MAINT: Update zesty to artful for i386 testing
+* `#10426 <https://github.com/numpy/numpy/pull/10426>`__: DOC: Add version when linalg.norm accepted axis
+* `#10427 <https://github.com/numpy/numpy/pull/10427>`__: DOC: Fix typo in docs for argpartition
+* `#10430 <https://github.com/numpy/numpy/pull/10430>`__: MAINT: Use ValueError for duplicate field names in lookup
+* `#10433 <https://github.com/numpy/numpy/pull/10433>`__: DOC: Add 1.14.1 release notes template (forward port)
+* `#10434 <https://github.com/numpy/numpy/pull/10434>`__: MAINT: Move `tools/announce.py` to `tools/changelog.py`.
+* `#10441 <https://github.com/numpy/numpy/pull/10441>`__: BUG: Fix nan_to_num return with integer input
+* `#10443 <https://github.com/numpy/numpy/pull/10443>`__: BUG: Fix various Big-Endian test failures (ppc64)
+* `#10444 <https://github.com/numpy/numpy/pull/10444>`__: MAINT: Implement float128 dragon4 for IBM double-double (ppc64)
+* `#10451 <https://github.com/numpy/numpy/pull/10451>`__: BUG: prevent the MSVC 14.1 compiler (Visual Studio 2017) from...
+* `#10453 <https://github.com/numpy/numpy/pull/10453>`__: Revert "BUG: prevent the MSVC 14.1 compiler (Visual Studio 2017)...
+* `#10458 <https://github.com/numpy/numpy/pull/10458>`__: BLD: Use zip_safe=False in setup() call
+* `#10459 <https://github.com/numpy/numpy/pull/10459>`__: MAINT: Remove duplicated logic between array_wrap and array_prepare
+* `#10463 <https://github.com/numpy/numpy/pull/10463>`__: ENH: Add entry_points for f2py, conv_template, and from_template.
+* `#10465 <https://github.com/numpy/numpy/pull/10465>`__: MAINT: Fix miscellaneous sign-compare warnings.
+* `#10472 <https://github.com/numpy/numpy/pull/10472>`__: DOC: Document A@B in Matlab/NumPy summary table
+* `#10473 <https://github.com/numpy/numpy/pull/10473>`__: BUG: Fixed polydiv for Complex Numbers
+* `#10475 <https://github.com/numpy/numpy/pull/10475>`__: DOC: Add CircleCI builder for devdocs
+* `#10476 <https://github.com/numpy/numpy/pull/10476>`__: DOC: fix formatting in interp example
+* `#10477 <https://github.com/numpy/numpy/pull/10477>`__: BUG: Align type definition with generated lapack
+* `#10478 <https://github.com/numpy/numpy/pull/10478>`__: DOC: Minor punctuation cleanups and improved explanation.
+* `#10479 <https://github.com/numpy/numpy/pull/10479>`__: BUG: Fix calling ufuncs with a positional output argument.
+* `#10482 <https://github.com/numpy/numpy/pull/10482>`__: BUG: Add missing DECREF in Py2 int() cast
+* `#10484 <https://github.com/numpy/numpy/pull/10484>`__: MAINT: Remove unused code path for applying maskedarray domains...
+* `#10497 <https://github.com/numpy/numpy/pull/10497>`__: DOC: Tell matlab users about np.block
+* `#10498 <https://github.com/numpy/numpy/pull/10498>`__: MAINT: Remove special cases in np.unique
+* `#10501 <https://github.com/numpy/numpy/pull/10501>`__: BUG: fromregex: asbytes called on regexp objects
+* `#10502 <https://github.com/numpy/numpy/pull/10502>`__: MAINT: Use AxisError in swapaxes, unique, and diagonal
+* `#10503 <https://github.com/numpy/numpy/pull/10503>`__: BUG: Fix unused-result warning.
+* `#10506 <https://github.com/numpy/numpy/pull/10506>`__: MAINT: Delete unused `_build_utils/common.py`
+* `#10508 <https://github.com/numpy/numpy/pull/10508>`__: BUG: Add missing `#define _MULTIARRAYMODULE` to vdot.c
+* `#10509 <https://github.com/numpy/numpy/pull/10509>`__: MAINT: Use new-style format strings for clarity
+* `#10516 <https://github.com/numpy/numpy/pull/10516>`__: MAINT: Allow errors to escape from InitOperators
+* `#10518 <https://github.com/numpy/numpy/pull/10518>`__: ENH: Add a repr to np._NoValue
+* `#10522 <https://github.com/numpy/numpy/pull/10522>`__: MAINT: Remove the unmaintained umath ``__version__`` constant.
+* `#10524 <https://github.com/numpy/numpy/pull/10524>`__: BUG: fix np.save issue with python 2.7.5
+* `#10529 <https://github.com/numpy/numpy/pull/10529>`__: BUG: Provide a better error message for out-of-order fields
+* `#10543 <https://github.com/numpy/numpy/pull/10543>`__: DEP: Issue FutureWarning when malformed records detected.
+* `#10544 <https://github.com/numpy/numpy/pull/10544>`__: BUG: infinite recursion in str of 0d subclasses
+* `#10546 <https://github.com/numpy/numpy/pull/10546>`__: BUG: In numpy.i, clear CARRAY flag if wrapped buffer is not C_CONTIGUOUS.
+* `#10547 <https://github.com/numpy/numpy/pull/10547>`__: DOC: Fix incorrect formula in gradient docstring.
+* `#10548 <https://github.com/numpy/numpy/pull/10548>`__: BUG: Set missing exception after malloc
+* `#10549 <https://github.com/numpy/numpy/pull/10549>`__: ENH: Make NpzFile conform to the Mapping protocol
+* `#10553 <https://github.com/numpy/numpy/pull/10553>`__: MAINT: Cleanups to promote_types and result_types
+* `#10554 <https://github.com/numpy/numpy/pull/10554>`__: DOC: promote_types is not associative by design,
+* `#10555 <https://github.com/numpy/numpy/pull/10555>`__: BUG: Add missing PyErr_NoMemory() after malloc
+* `#10564 <https://github.com/numpy/numpy/pull/10564>`__: BUG: Provide correct format in Py_buffer for scalars
+* `#10566 <https://github.com/numpy/numpy/pull/10566>`__: BUG: Fix travis failure in previous commit
+* `#10571 <https://github.com/numpy/numpy/pull/10571>`__: BUG: Fix corner-case behavior of cond() and use SVD when possible
+* `#10576 <https://github.com/numpy/numpy/pull/10576>`__: MAINT: Fix misc. documentation typos
+* `#10583 <https://github.com/numpy/numpy/pull/10583>`__: MAINT: Fix typos in DISTUTILS.rst.txt.
+* `#10588 <https://github.com/numpy/numpy/pull/10588>`__: BUG: Revert sort optimization in np.unique.
+* `#10589 <https://github.com/numpy/numpy/pull/10589>`__: BUG: fix entry_points typo for from-template
+* `#10591 <https://github.com/numpy/numpy/pull/10591>`__: ENH: Add histogram_bin_edges function and test
+* `#10592 <https://github.com/numpy/numpy/pull/10592>`__: DOC: Corrected url for Guide to NumPy book; see part of #8520,...
+* `#10596 <https://github.com/numpy/numpy/pull/10596>`__: MAINT: Update sphinxext submodule hash.
+* `#10599 <https://github.com/numpy/numpy/pull/10599>`__: ENH: Make flatnonzero call asanyarray before ravel()
+* `#10603 <https://github.com/numpy/numpy/pull/10603>`__: MAINT: Improve error message in histogram.
+* `#10604 <https://github.com/numpy/numpy/pull/10604>`__: MAINT: Fix Misc. typos
+* `#10606 <https://github.com/numpy/numpy/pull/10606>`__: MAINT: Do not use random roots when testing roots.
+* `#10618 <https://github.com/numpy/numpy/pull/10618>`__: MAINT: Stop using non-tuple indices internally
+* `#10619 <https://github.com/numpy/numpy/pull/10619>`__: BUG: np.ma.flatnotmasked_contiguous behaves differently on mask=nomask...
+* `#10621 <https://github.com/numpy/numpy/pull/10621>`__: BUG: deallocate recursive closure in arrayprint.py
+* `#10623 <https://github.com/numpy/numpy/pull/10623>`__: BUG: Correctly identify comma seperated dtype strings
+* `#10625 <https://github.com/numpy/numpy/pull/10625>`__: BUG: Improve the accuracy of the FFT implementation
+* `#10635 <https://github.com/numpy/numpy/pull/10635>`__: ENH: Implement initial kwarg for ufunc.add.reduce
+* `#10641 <https://github.com/numpy/numpy/pull/10641>`__: MAINT: Post 1.14.1 release updates for master branch
+* `#10650 <https://github.com/numpy/numpy/pull/10650>`__: BUG: Fix missing NPY_VISIBILITY_HIDDEN on npy_longdouble_to_PyLong
+* `#10653 <https://github.com/numpy/numpy/pull/10653>`__: MAINT: Remove duplicate implementation for aliased functions.
+* `#10657 <https://github.com/numpy/numpy/pull/10657>`__: BUG: f2py: fix f2py generated code to work on Pypy
+* `#10658 <https://github.com/numpy/numpy/pull/10658>`__: BUG: Make np.partition and np.sort work on np.matrix when axis=None
+* `#10660 <https://github.com/numpy/numpy/pull/10660>`__: BUG/MAINT: Remove special cases for 0d arrays in interp
+* `#10661 <https://github.com/numpy/numpy/pull/10661>`__: MAINT: Unify reductions in fromnumeric.py
+* `#10665 <https://github.com/numpy/numpy/pull/10665>`__: ENH: umath: don't make temporary copies for in-place accumulation
+* `#10666 <https://github.com/numpy/numpy/pull/10666>`__: BUG: fix complex casting error in cov with aweights
+* `#10669 <https://github.com/numpy/numpy/pull/10669>`__: MAINT: Covariance must be symmetric as well as positive-semidefinite.
+* `#10670 <https://github.com/numpy/numpy/pull/10670>`__: DEP: Deprecate np.sum(generator)
+* `#10671 <https://github.com/numpy/numpy/pull/10671>`__: DOC/MAINT: More misc. typos
+* `#10672 <https://github.com/numpy/numpy/pull/10672>`__: ENH: Allow dtype field names to be ascii encoded unicode in Python2
+* `#10676 <https://github.com/numpy/numpy/pull/10676>`__: BUG: F2py mishandles quoted control characters
+* `#10677 <https://github.com/numpy/numpy/pull/10677>`__: STY: Minor stylistic cleanup of numeric.py
+* `#10679 <https://github.com/numpy/numpy/pull/10679>`__: DOC: zeros, empty, and ones now have consistent docstrings
+* `#10684 <https://github.com/numpy/numpy/pull/10684>`__: ENH: Modify intersect1d to return common indices
+* `#10689 <https://github.com/numpy/numpy/pull/10689>`__: BLD: Add configuration changes to allow cross platform builds...
+* `#10691 <https://github.com/numpy/numpy/pull/10691>`__: DOC: add versionadded for NDArrayOperatorsMixin.
+* `#10694 <https://github.com/numpy/numpy/pull/10694>`__: DOC: Improve docstring of memmap
+* `#10698 <https://github.com/numpy/numpy/pull/10698>`__: BUG: Further back-compat fix for subclassed array repr (forward...
+* `#10699 <https://github.com/numpy/numpy/pull/10699>`__: DOC: Grammar of np.gradient docstring
+* `#10702 <https://github.com/numpy/numpy/pull/10702>`__: TST, DOC: Upload devdocs and neps after circleci build
+* `#10703 <https://github.com/numpy/numpy/pull/10703>`__: MAINT: NEP process updates
+* `#10708 <https://github.com/numpy/numpy/pull/10708>`__: BUG: fix problem with modifing pyf lines containing ';' in f2py
+* `#10710 <https://github.com/numpy/numpy/pull/10710>`__: BUG: fix error message in numpy.select
+* `#10711 <https://github.com/numpy/numpy/pull/10711>`__: MAINT: Hard tab and whitespace cleanup.
+* `#10715 <https://github.com/numpy/numpy/pull/10715>`__: MAINT: Fixed C++ guard in f2py test.
+* `#10716 <https://github.com/numpy/numpy/pull/10716>`__: BUG: dragon4 fractional output mode adds too many trailing zeros
+* `#10718 <https://github.com/numpy/numpy/pull/10718>`__: BUG: Fix bug in asserting near equality of float16 arrays.
+* `#10719 <https://github.com/numpy/numpy/pull/10719>`__: DOC: add documentation for constants
+* `#10720 <https://github.com/numpy/numpy/pull/10720>`__: BUG: distutils: Remove named templates from the processed output...
+* `#10722 <https://github.com/numpy/numpy/pull/10722>`__: MAINT: Misc small fixes.
+* `#10730 <https://github.com/numpy/numpy/pull/10730>`__: DOC: Fix minor typo in how-to-document.
+* `#10732 <https://github.com/numpy/numpy/pull/10732>`__: BUG: Fix `setup.py build install egg_info`, which did not previously...
+* `#10734 <https://github.com/numpy/numpy/pull/10734>`__: DOC: Post 1.14.2 release update.
+* `#10737 <https://github.com/numpy/numpy/pull/10737>`__: MAINT: Fix low-hanging PyPy compatibility issues
+* `#10739 <https://github.com/numpy/numpy/pull/10739>`__: BUG: Fix histogram bins="auto" for data with little variance
+* `#10740 <https://github.com/numpy/numpy/pull/10740>`__: MAINT, TST: Fixes for Python 3.7
+* `#10743 <https://github.com/numpy/numpy/pull/10743>`__: MAINT: Import abstract classes from collections.abc
+* `#10745 <https://github.com/numpy/numpy/pull/10745>`__: ENH: Add object loops to the comparison ufuncs
+* `#10746 <https://github.com/numpy/numpy/pull/10746>`__: MAINT: Fix typo in warning message
+* `#10748 <https://github.com/numpy/numpy/pull/10748>`__: DOC: a.size and np.prod(a.shape) are not equivalent
+* `#10750 <https://github.com/numpy/numpy/pull/10750>`__: DOC: Add graph showing different behaviors of np.percentile
+* `#10755 <https://github.com/numpy/numpy/pull/10755>`__: DOC: Move bin estimator documentation from `histogram` to `histogram_bin_edges`
+* `#10758 <https://github.com/numpy/numpy/pull/10758>`__: TST: Change most travisci tests to Python3.6.
+* `#10763 <https://github.com/numpy/numpy/pull/10763>`__: BUG: floating types should override tp_print
+* `#10766 <https://github.com/numpy/numpy/pull/10766>`__: MAINT: Remove the unused scalarmath getters for fmod and sqrt
+* `#10773 <https://github.com/numpy/numpy/pull/10773>`__: BUG: Use dummy_threading on platforms that don't support threading
+* `#10774 <https://github.com/numpy/numpy/pull/10774>`__: BUG: Fix SQRT_MIN for platforms with 8-byte long double
+* `#10775 <https://github.com/numpy/numpy/pull/10775>`__: BUG: Return NULL from PyInit_* when exception is raised
+* `#10777 <https://github.com/numpy/numpy/pull/10777>`__: MAINT: Remove use of unittest in NumPy tests.
+* `#10778 <https://github.com/numpy/numpy/pull/10778>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10781 <https://github.com/numpy/numpy/pull/10781>`__: ENH: NEP index builder
+* `#10785 <https://github.com/numpy/numpy/pull/10785>`__: DOC: Fixed author name in reference to book
+* `#10786 <https://github.com/numpy/numpy/pull/10786>`__: ENH: Add "stablesort" option to inp.sort as an alias for "mergesort".
+* `#10790 <https://github.com/numpy/numpy/pull/10790>`__: TST: Various fixes prior to switching to pytest
+* `#10795 <https://github.com/numpy/numpy/pull/10795>`__: BUG: Allow spaces in output string of einsum
+* `#10796 <https://github.com/numpy/numpy/pull/10796>`__: BUG: fix wrong inplace vectorization on overlapping arguments
+* `#10798 <https://github.com/numpy/numpy/pull/10798>`__: BUG: error checking before mapping of einsum axes.
+* `#10800 <https://github.com/numpy/numpy/pull/10800>`__: DOC: Add remarks about array vs scalar output to every ufunc
+* `#10802 <https://github.com/numpy/numpy/pull/10802>`__: BUG/DOC/MAINT: Tidy up histogramdd
+* `#10807 <https://github.com/numpy/numpy/pull/10807>`__: DOC: Update link to tox in development docs (#10806)
+* `#10812 <https://github.com/numpy/numpy/pull/10812>`__: MAINT: Rearrange `numpy/testing` files
+* `#10814 <https://github.com/numpy/numpy/pull/10814>`__: BUG: verify the OS supports avx instruction
+* `#10822 <https://github.com/numpy/numpy/pull/10822>`__: BUG: fixes exception in numpy.genfromtxt, see #10780
+* `#10824 <https://github.com/numpy/numpy/pull/10824>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy refcount issue...
+* `#10826 <https://github.com/numpy/numpy/pull/10826>`__: BUG: np.squeeze() now respects older API axis expectation
+* `#10827 <https://github.com/numpy/numpy/pull/10827>`__: ENH: Add tester for pytest.
+* `#10828 <https://github.com/numpy/numpy/pull/10828>`__: BUG: fix obvious mistake in testing/decorators warning.
+* `#10829 <https://github.com/numpy/numpy/pull/10829>`__: BLD: use Python 3.6 instead of 2.7 as default for doc build.
+* `#10830 <https://github.com/numpy/numpy/pull/10830>`__: BUG: Fix obvious warning bugs.
+* `#10831 <https://github.com/numpy/numpy/pull/10831>`__: DOC: Fix minor typos
+* `#10832 <https://github.com/numpy/numpy/pull/10832>`__: ENH: datetime64: support AC dates starting with '+'
+* `#10833 <https://github.com/numpy/numpy/pull/10833>`__: ENH: Add support for the 64-bit RISC-V architecture
+* `#10834 <https://github.com/numpy/numpy/pull/10834>`__: DOC: note that NDEBUG should be set when OPT should increase...
+* `#10836 <https://github.com/numpy/numpy/pull/10836>`__: MAINT: Fix script name for pushing NEP docs to repo
+* `#10840 <https://github.com/numpy/numpy/pull/10840>`__: MAINT: Fix typo in code example.
+* `#10842 <https://github.com/numpy/numpy/pull/10842>`__: TST: Switch to pytest
+* `#10849 <https://github.com/numpy/numpy/pull/10849>`__: DOC: fix examples in docstring for np.flip
+* `#10850 <https://github.com/numpy/numpy/pull/10850>`__: DEP: Issue deprecation warnings for some imports.
+* `#10858 <https://github.com/numpy/numpy/pull/10858>`__: MAINT: Post pytest switch cleanup
+* `#10859 <https://github.com/numpy/numpy/pull/10859>`__: MAINT: Remove yield tests
+* `#10860 <https://github.com/numpy/numpy/pull/10860>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10863 <https://github.com/numpy/numpy/pull/10863>`__: MAINT: More Histogramdd cleanup
+* `#10867 <https://github.com/numpy/numpy/pull/10867>`__: DOC: Cross Link full/full_like in a few see-also sections.
+* `#10869 <https://github.com/numpy/numpy/pull/10869>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10871 <https://github.com/numpy/numpy/pull/10871>`__: MAINT: Remove unnecessary special case in np.histogramdd for...
+* `#10872 <https://github.com/numpy/numpy/pull/10872>`__: ENH: Extend np.flip to work over multiple axes
+* `#10874 <https://github.com/numpy/numpy/pull/10874>`__: DOC: State in docstring that lexsort is stable (#10873).
+* `#10875 <https://github.com/numpy/numpy/pull/10875>`__: BUG: fix savetxt, loadtxt for '+-' in complex
+* `#10878 <https://github.com/numpy/numpy/pull/10878>`__: DOC: rework documents and silence warnings during sphinx build
+* `#10882 <https://github.com/numpy/numpy/pull/10882>`__: BUG: have `_array_from_buffer_3118` correctly handle errors
+* `#10883 <https://github.com/numpy/numpy/pull/10883>`__: DOC: Fix negative binomial documentation.
+* `#10885 <https://github.com/numpy/numpy/pull/10885>`__: TST: Re-enable test display on appveyor
+* `#10890 <https://github.com/numpy/numpy/pull/10890>`__: MAINT: lstsq: compute residuals inside the ufunc
+* `#10891 <https://github.com/numpy/numpy/pull/10891>`__: TST: Extract a helper function to test for reference cycles
+* `#10898 <https://github.com/numpy/numpy/pull/10898>`__: ENH: Have dtype transfer for equivalent user dtypes prefer user-defined...
+* `#10901 <https://github.com/numpy/numpy/pull/10901>`__: DOC, BUG : Bad link to `np.random.randint`
+* `#10903 <https://github.com/numpy/numpy/pull/10903>`__: DOC: Fix link in `See Also` section of `randn` docstring.
+* `#10907 <https://github.com/numpy/numpy/pull/10907>`__: TST: reactivate module docstring tests, fix float formatting
+* `#10911 <https://github.com/numpy/numpy/pull/10911>`__: BUG: Fix casting between npy_half and float in einsum
+* `#10916 <https://github.com/numpy/numpy/pull/10916>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10919 <https://github.com/numpy/numpy/pull/10919>`__: BUG: Pass non-None outputs to `__array_prepare__` and `__array_wrap__`
+* `#10921 <https://github.com/numpy/numpy/pull/10921>`__: DOC: clear up warnings, fix matplotlib plot
+* `#10923 <https://github.com/numpy/numpy/pull/10923>`__: BUG: fixed dtype alignment for array of structs in case of converting...
+* `#10925 <https://github.com/numpy/numpy/pull/10925>`__: DOC: Fix typos in 1.15.0 changelog
+* `#10936 <https://github.com/numpy/numpy/pull/10936>`__: DOC: Fix NumpyVersion example (closes gh-10935)
+* `#10938 <https://github.com/numpy/numpy/pull/10938>`__: MAINT: One step closer to vectorizing lstsq
+* `#10940 <https://github.com/numpy/numpy/pull/10940>`__: DOC: fix broken links for developer documentation
+* `#10943 <https://github.com/numpy/numpy/pull/10943>`__: ENH: Add a search box to the sidebar in the docs
+* `#10945 <https://github.com/numpy/numpy/pull/10945>`__: MAINT: Remove references to the 2008 documentation marathon
+* `#10946 <https://github.com/numpy/numpy/pull/10946>`__: BUG: 'style' arg to array2string broken in legacy mode
+* `#10949 <https://github.com/numpy/numpy/pull/10949>`__: DOC: cleanup documentation, continuation of nditer PR #9998
+* `#10951 <https://github.com/numpy/numpy/pull/10951>`__: BUG: it.close() disallows access to iterator, fixes #10950
+* `#10953 <https://github.com/numpy/numpy/pull/10953>`__: MAINT: address extraneous shape tuple checks in descriptor.c
+* `#10958 <https://github.com/numpy/numpy/pull/10958>`__: MAINT, DOC: Fix typos
+* `#10967 <https://github.com/numpy/numpy/pull/10967>`__: DOC: add quantile, nanquantile to toc
+* `#10970 <https://github.com/numpy/numpy/pull/10970>`__: WIP: Remove fragile use of `__array_interface__` in ctypeslib.as_array
+* `#10971 <https://github.com/numpy/numpy/pull/10971>`__: MAINT: Remove workaround for gh-10891
+* `#10973 <https://github.com/numpy/numpy/pull/10973>`__: DOC: advise against use of matrix.
+* `#10975 <https://github.com/numpy/numpy/pull/10975>`__: MAINT: move linalg tests using matrix to matrixlib
+* `#10980 <https://github.com/numpy/numpy/pull/10980>`__: DOC: link to governance, convert external link to internal
+* `#10984 <https://github.com/numpy/numpy/pull/10984>`__: MAINT: Added pytest cache folder to .gitignore
+* `#10985 <https://github.com/numpy/numpy/pull/10985>`__: MAINT, ENH: Move matrix_power to linalg and allow higher dimensions.
+* `#10986 <https://github.com/numpy/numpy/pull/10986>`__: MAINT: move all masked array matrix tests to matrixlib.
+* `#10987 <https://github.com/numpy/numpy/pull/10987>`__: DOC: Correction to docstring example (result was correct)
+* `#10988 <https://github.com/numpy/numpy/pull/10988>`__: MAINT: Small tidy-ups to ufunc_object.c
+* `#10991 <https://github.com/numpy/numpy/pull/10991>`__: DOC: Update genfromtxt docs to use StringIO and u-strings
+* `#10996 <https://github.com/numpy/numpy/pull/10996>`__: DOC: Make doc examples using StringIO python2-3 compatible
+* `#11003 <https://github.com/numpy/numpy/pull/11003>`__: DOC: work around GH isaacs/github#316 to show SVG image
+* `#11005 <https://github.com/numpy/numpy/pull/11005>`__: MAINT: Misc. typos
+* `#11006 <https://github.com/numpy/numpy/pull/11006>`__: TST, BUILD: add latex to circleci doc build
+* `#11008 <https://github.com/numpy/numpy/pull/11008>`__: REL: Fwd port 1.14.3 changelog
+* `#11009 <https://github.com/numpy/numpy/pull/11009>`__: DOC: release walkthrough updates from 1.14.3
+* `#11010 <https://github.com/numpy/numpy/pull/11010>`__: Move remaining Matrix tests to matrixlib
+* `#11011 <https://github.com/numpy/numpy/pull/11011>`__: MAINT: Simplify dimension-juggling in np.pad
+* `#11012 <https://github.com/numpy/numpy/pull/11012>`__: MAINT: np.pad: Add helper functions for producing slices along...
+* `#11018 <https://github.com/numpy/numpy/pull/11018>`__: ENH: Implement axis for generalized ufuncs.
+* `#11023 <https://github.com/numpy/numpy/pull/11023>`__: BUG: np.histogramdd loses precision on its inputs, leading to...
+* `#11026 <https://github.com/numpy/numpy/pull/11026>`__: MAINT: reduce code duplication in ufunc_frompyfunc
+* `#11033 <https://github.com/numpy/numpy/pull/11033>`__: BUG: Fix padding with large integers
+* `#11036 <https://github.com/numpy/numpy/pull/11036>`__: BUG: optimizing compilers can reorder call to npy_get_floatstatus
+* `#11037 <https://github.com/numpy/numpy/pull/11037>`__: BUG: initialize value before use
+* `#11038 <https://github.com/numpy/numpy/pull/11038>`__: ENH: Add `__deepcopy__` to MaskedConstant
+* `#11043 <https://github.com/numpy/numpy/pull/11043>`__: BUG: reduce using SSE only warns if inside SSE loop
+* `#11050 <https://github.com/numpy/numpy/pull/11050>`__: BUG: remove fast scalar power for arrays with object dtype
+* `#11053 <https://github.com/numpy/numpy/pull/11053>`__: DOC: bump scipy-sphinx-theme to current version
+* `#11055 <https://github.com/numpy/numpy/pull/11055>`__: DOC: Add explanation for comments=None in loadtxt.
+* `#11056 <https://github.com/numpy/numpy/pull/11056>`__: MAINT: Improve performance of random permutation
+* `#11057 <https://github.com/numpy/numpy/pull/11057>`__: BUG: use absolute imports in test files
+* `#11066 <https://github.com/numpy/numpy/pull/11066>`__: MAINT: `distutils.system_info`: handle Accelerate like any other...
+* `#11073 <https://github.com/numpy/numpy/pull/11073>`__: DOC: expand reasoning behind npy_*floatstatus_barrer()
+* `#11076 <https://github.com/numpy/numpy/pull/11076>`__: BUG: Ensure `PyArray_AssignRawScalar` respects `NPY_NEEDS_INIT`
+* `#11082 <https://github.com/numpy/numpy/pull/11082>`__: DOC: link to updated module docstring, not NEP
+* `#11083 <https://github.com/numpy/numpy/pull/11083>`__: ENH: remove nose from travis tests
+* `#11085 <https://github.com/numpy/numpy/pull/11085>`__: DOC: create label and ref, fixes broken link
+* `#11086 <https://github.com/numpy/numpy/pull/11086>`__: DOC: Mention we can return unitinitialized values
+* `#11089 <https://github.com/numpy/numpy/pull/11089>`__: BLD: cleanup `_configtest.o.d` during build
+* `#11090 <https://github.com/numpy/numpy/pull/11090>`__: BUG: Added support for index values 27-52 in C einsum
+* `#11091 <https://github.com/numpy/numpy/pull/11091>`__: BUG: Python2 doubles don't print correctly in interactive shell
+* `#11094 <https://github.com/numpy/numpy/pull/11094>`__: DOC: add numpy.lib.format to docs and link to it
+* `#11095 <https://github.com/numpy/numpy/pull/11095>`__: MAINT: Einsum argument parsing cleanup
+* `#11097 <https://github.com/numpy/numpy/pull/11097>`__: BUG: fix datetime.timedelta->timedelta64 unit detection logic
+* `#11098 <https://github.com/numpy/numpy/pull/11098>`__: ENH: Add keepdims argument for generalized ufuncs.
+* `#11105 <https://github.com/numpy/numpy/pull/11105>`__: ENH: Add (put|take)_along_axis
+* `#11111 <https://github.com/numpy/numpy/pull/11111>`__: BUG: fix case of ISA selector in ufunc selection
+* `#11116 <https://github.com/numpy/numpy/pull/11116>`__: BUG: Typo in variable name in binary_repr
+* `#11120 <https://github.com/numpy/numpy/pull/11120>`__: MAINT: remove redundant code in `MaskedArray.__new__`
+* `#11122 <https://github.com/numpy/numpy/pull/11122>`__: BUG,MAINT: Ensure masked elements can be tested against nan and...
+* `#11124 <https://github.com/numpy/numpy/pull/11124>`__: BUG: Ensure that fully masked arrays pass assert_array_equal.
+* `#11134 <https://github.com/numpy/numpy/pull/11134>`__: DOC: Clarify tofile requirements
+* `#11137 <https://github.com/numpy/numpy/pull/11137>`__: MAINT: move remaining MaskedArray matrix tests to matrixlib.
+* `#11139 <https://github.com/numpy/numpy/pull/11139>`__: TST: turn some build warnings into errors
+* `#11140 <https://github.com/numpy/numpy/pull/11140>`__: MAINT: Update artful to bionic for i386 testing
+* `#11141 <https://github.com/numpy/numpy/pull/11141>`__: MAINT: Extract a helper function for prepending and appending
+* `#11145 <https://github.com/numpy/numpy/pull/11145>`__: DOC: cleanup NEP creation
+* `#11146 <https://github.com/numpy/numpy/pull/11146>`__: DOC: add a NEP to split MaskedArray into a separate package
+* `#11148 <https://github.com/numpy/numpy/pull/11148>`__: TST: make build warning into an error in runtest.py
+* `#11149 <https://github.com/numpy/numpy/pull/11149>`__: BUG: guessing datetime, time precedence
+* `#11152 <https://github.com/numpy/numpy/pull/11152>`__: BENCH: Add basic benchmarks for numpy.pad
+* `#11155 <https://github.com/numpy/numpy/pull/11155>`__: BUG: Prevent stackoverflow in conversion to datetime types
+* `#11158 <https://github.com/numpy/numpy/pull/11158>`__: TST: disable gc in refcount test
+* `#11159 <https://github.com/numpy/numpy/pull/11159>`__: TST: Skip ctypes dependent test that fails on Python < 2.7.7.
+* `#11160 <https://github.com/numpy/numpy/pull/11160>`__: TST: windows builds now properly support floating error states
+* `#11163 <https://github.com/numpy/numpy/pull/11163>`__: MAINT: Work around non-deterministic Python readdir order in...
+* `#11167 <https://github.com/numpy/numpy/pull/11167>`__: MAINT: Cleanup dragon4 code in various ways
+* `#11168 <https://github.com/numpy/numpy/pull/11168>`__: TST: linalg: add regression test for gh-8577
+* `#11169 <https://github.com/numpy/numpy/pull/11169>`__: MAINT: add sanity-checks to be run at import time
+* `#11173 <https://github.com/numpy/numpy/pull/11173>`__: MAINT: Ensure that parsing errors are passed on even in tests.
+* `#11176 <https://github.com/numpy/numpy/pull/11176>`__: MAINT: avoid setting non-existing gufunc strides for keepdims=True.
+* `#11177 <https://github.com/numpy/numpy/pull/11177>`__: DOC: improvement of the documentation for gufunc.
+* `#11178 <https://github.com/numpy/numpy/pull/11178>`__: TST: Test dimensions/indices found from parsed gufunc signatures.
+* `#11180 <https://github.com/numpy/numpy/pull/11180>`__: BUG: void dtype setup checked offset not actual pointer for alignment
+* `#11182 <https://github.com/numpy/numpy/pull/11182>`__: BUG: Avoid deprecated non-tuple indexing
+* `#11184 <https://github.com/numpy/numpy/pull/11184>`__: MAINT: Add bitmask helper functions
+* `#11185 <https://github.com/numpy/numpy/pull/11185>`__: MAINT: Add comments to long_double detection code
+* `#11186 <https://github.com/numpy/numpy/pull/11186>`__: TST: Add np.core._multiarray_tests.format_float_OSprintf_g
+* `#11187 <https://github.com/numpy/numpy/pull/11187>`__: MAINT: Use the more common -1 / 0 to indicate error / success
+* `#11189 <https://github.com/numpy/numpy/pull/11189>`__: NEP: Array function protocol
+* `#11190 <https://github.com/numpy/numpy/pull/11190>`__: DOC: Update NEP0 to clarify that discussion should happen on...
+* `#11191 <https://github.com/numpy/numpy/pull/11191>`__: MAINT: remove darwin hardcoded LDOUBLE detection
+* `#11193 <https://github.com/numpy/numpy/pull/11193>`__: BUG: Fix reference count/memory leak exposed by better testing
+* `#11200 <https://github.com/numpy/numpy/pull/11200>`__: BUG: Bytes delimiter/comments in genfromtxt should be decoded
+* `#11209 <https://github.com/numpy/numpy/pull/11209>`__: DOC: Fix doctest formatting in `rot90()` examples
+* `#11218 <https://github.com/numpy/numpy/pull/11218>`__: BUG: Fixes einsum broadcasting bug when optimize=True
+* `#11222 <https://github.com/numpy/numpy/pull/11222>`__: DOC: Make reference doc nditer examples python3 friendly
+* `#11223 <https://github.com/numpy/numpy/pull/11223>`__: BUG: Forcibly promote shape to uint64 in numpy.memmap.
+* `#11225 <https://github.com/numpy/numpy/pull/11225>`__: DOC: add existing recfunctions documentation to output
+* `#11226 <https://github.com/numpy/numpy/pull/11226>`__: MAINT: add 'rst' to nep filename, fixup urls
+* `#11229 <https://github.com/numpy/numpy/pull/11229>`__: NEP: New RNG policy
+* `#11231 <https://github.com/numpy/numpy/pull/11231>`__: MAINT: ensure we do not create unnecessary tuples for outputs
+* `#11238 <https://github.com/numpy/numpy/pull/11238>`__: MAINT: Don't update the flags a second time
+* `#11239 <https://github.com/numpy/numpy/pull/11239>`__: MAINT: Use PyArray_NewFromDescr where possible, remove unused...
+* `#11240 <https://github.com/numpy/numpy/pull/11240>`__: MAINT: Remove dead code backporting py2.6 warnings
+* `#11246 <https://github.com/numpy/numpy/pull/11246>`__: BUG: Set ndarray.base before `__array_finalize__`
+* `#11247 <https://github.com/numpy/numpy/pull/11247>`__: MAINT/BUG: Remove out-of-band reference count in PyArray_Newshape,...
+* `#11248 <https://github.com/numpy/numpy/pull/11248>`__: MAINT: Don't update the flags a second time
+* `#11249 <https://github.com/numpy/numpy/pull/11249>`__: BUG: Remove errant flag meddling in .real and .imag
+* `#11252 <https://github.com/numpy/numpy/pull/11252>`__: DOC: show how to generate release notes in release walkthrough
+* `#11257 <https://github.com/numpy/numpy/pull/11257>`__: BUG: ensure extobj and axes have their own references.
+* `#11260 <https://github.com/numpy/numpy/pull/11260>`__: MAINT: Do proper cleanup in get_ufunc_arguments.
+* `#11263 <https://github.com/numpy/numpy/pull/11263>`__: DOC: Update master after NumPy 1.14.4 release.
+* `#11269 <https://github.com/numpy/numpy/pull/11269>`__: BUG: Correct use of NPY_UNUSED.
+* `#11273 <https://github.com/numpy/numpy/pull/11273>`__: BUG: Remove invalid read in searchsorted if needle is empty
+* `#11275 <https://github.com/numpy/numpy/pull/11275>`__: TST: Do not use empty arrays in tests (unless they are not read)
+* `#11277 <https://github.com/numpy/numpy/pull/11277>`__: BUG: Work around past and present PEP3118 issues in ctypes
+* `#11280 <https://github.com/numpy/numpy/pull/11280>`__: DOC: make docstring of np.interp clearer
+* `#11286 <https://github.com/numpy/numpy/pull/11286>`__: BUG: einsum needs to check overlap on an out argument
+* `#11287 <https://github.com/numpy/numpy/pull/11287>`__: DOC: Minor documentation improvements
+* `#11291 <https://github.com/numpy/numpy/pull/11291>`__: BUG: Remove extra trailing parentheses.
+* `#11293 <https://github.com/numpy/numpy/pull/11293>`__: DOC: fix hierarchy of numericaltype
+* `#11296 <https://github.com/numpy/numpy/pull/11296>`__: BUG: Fix segfault on failing `__array_wrap__`
+* `#11298 <https://github.com/numpy/numpy/pull/11298>`__: BUG: Undo behavior change in ma.masked_values(shrink=True)
+* `#11307 <https://github.com/numpy/numpy/pull/11307>`__: BUG: Fix memmap regression when shape=None
+* `#11314 <https://github.com/numpy/numpy/pull/11314>`__: MAINT: remove unused "npy_import"
+* `#11315 <https://github.com/numpy/numpy/pull/11315>`__: MAINT: Package `tools/allocation_tracking`
+* `#11319 <https://github.com/numpy/numpy/pull/11319>`__: REL, REV: Revert f2py fixes that exposed SciPy bug.
+* `#11327 <https://github.com/numpy/numpy/pull/11327>`__: DOC: Update release notes for 1.15.0.
+* `#11339 <https://github.com/numpy/numpy/pull/11339>`__: BUG: decref in failure path; replace PyObject_Type by Py_TYPE
+* `#11352 <https://github.com/numpy/numpy/pull/11352>`__: DEP: Actually deprecate the normed argument to histogram
+* `#11359 <https://github.com/numpy/numpy/pull/11359>`__: DOC: document new functions
+* `#11367 <https://github.com/numpy/numpy/pull/11367>`__: BUG: add missing NpyIter_Close in einsum
+* `#11368 <https://github.com/numpy/numpy/pull/11368>`__: BUG/TST: String indexing should just fail, not emit a futurewarning
+* `#11389 <https://github.com/numpy/numpy/pull/11389>`__: ENH: Remove NpyIter_Close
+* `#11392 <https://github.com/numpy/numpy/pull/11392>`__: BUG: Make scalar.squeeze accept axis arg
+* `#11393 <https://github.com/numpy/numpy/pull/11393>`__: REL,MAINT: Update numpyconfig.h for 1.15.
+* `#11394 <https://github.com/numpy/numpy/pull/11394>`__: MAINT: Update mailmap
+* `#11403 <https://github.com/numpy/numpy/pull/11403>`__: DOC: Remove npyiter close from notes
+* `#11427 <https://github.com/numpy/numpy/pull/11427>`__: BUG: Fix incorrect deprecation logic for histogram(normed=...)...
+* `#11489 <https://github.com/numpy/numpy/pull/11489>`__: BUG: Ensure out is returned in einsum.
+* `#11491 <https://github.com/numpy/numpy/pull/11491>`__: BUG/ENH: Einsum optimization path updates and bug fixes.
+* `#11493 <https://github.com/numpy/numpy/pull/11493>`__: BUG: Revert #10229 to fix DLL loads on Windows.
+* `#11494 <https://github.com/numpy/numpy/pull/11494>`__: MAINT: add PyPI classifier for Python 3.7
+* `#11495 <https://github.com/numpy/numpy/pull/11495>`__: BENCH: belated addition of lcm, gcd to ufunc benchmark.
+* `#11496 <https://github.com/numpy/numpy/pull/11496>`__: BUG: Advanced indexing assignment incorrectly took 1-D fastpath
+* `#11511 <https://github.com/numpy/numpy/pull/11511>`__: BUG: Fix #define for ppc64 and ppc64le
+* `#11529 <https://github.com/numpy/numpy/pull/11529>`__: ENH: Add density argument to histogramdd.
+* `#11532 <https://github.com/numpy/numpy/pull/11532>`__: BUG: Decref of field title caused segfault
+* `#11540 <https://github.com/numpy/numpy/pull/11540>`__: DOC: Update the 1.15.0 release notes.
+* `#11577 <https://github.com/numpy/numpy/pull/11577>`__: BLD: Modify cpu detection and printing to get working aarch64...
+* `#11578 <https://github.com/numpy/numpy/pull/11578>`__: DOC: link to TESTS.rst.txt testing guidelines, tweak testing...
+* `#11602 <https://github.com/numpy/numpy/pull/11602>`__: TST: Add Python 3.7 to CI testing
--- /dev/null
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Billington
+* Elliott Sales de Andrade +
+* Eric Wieser
+* Jeremy Manning +
+* Matti Picus
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#11647 <https://github.com/numpy/numpy/pull/11647>`__: MAINT: Filter Cython warnings in ``__init__.py``
+* `#11648 <https://github.com/numpy/numpy/pull/11648>`__: BUG: Fix doc source links to unwrap decorators
+* `#11657 <https://github.com/numpy/numpy/pull/11657>`__: BUG: Ensure singleton dimensions are not dropped when converting...
+* `#11661 <https://github.com/numpy/numpy/pull/11661>`__: BUG: Warn on Nan in minimum,maximum for scalars
+* `#11665 <https://github.com/numpy/numpy/pull/11665>`__: BUG: cython sometimes emits invalid gcc attribute
+* `#11682 <https://github.com/numpy/numpy/pull/11682>`__: BUG: Fix regression in void_getitem
+* `#11698 <https://github.com/numpy/numpy/pull/11698>`__: BUG: Make matrix_power again work for object arrays.
+* `#11700 <https://github.com/numpy/numpy/pull/11700>`__: BUG: Add missing PyErr_NoMemory after failing malloc
+* `#11719 <https://github.com/numpy/numpy/pull/11719>`__: BUG: Fix undefined functions on big-endian systems.
+* `#11720 <https://github.com/numpy/numpy/pull/11720>`__: MAINT: Make einsum optimize default to False.
+* `#11746 <https://github.com/numpy/numpy/pull/11746>`__: BUG: Fix regression in loadtxt for bz2 text files in Python 2.
+* `#11757 <https://github.com/numpy/numpy/pull/11757>`__: BUG: Revert use of `console_scripts`.
+* `#11758 <https://github.com/numpy/numpy/pull/11758>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
+* `#11759 <https://github.com/numpy/numpy/pull/11759>`__: BUG: Fix printing of longdouble on ppc64le.
+* `#11760 <https://github.com/numpy/numpy/pull/11760>`__: BUG: Fixes for unicode field names in Python 2
+* `#11761 <https://github.com/numpy/numpy/pull/11761>`__: BUG: Increase required cython version on python 3.7
+* `#11763 <https://github.com/numpy/numpy/pull/11763>`__: BUG: check return value of _buffer_format_string
+* `#11775 <https://github.com/numpy/numpy/pull/11775>`__: MAINT: Make assert_array_compare more generic.
+* `#11776 <https://github.com/numpy/numpy/pull/11776>`__: TST: Fix urlopen stubbing.
+* `#11777 <https://github.com/numpy/numpy/pull/11777>`__: BUG: Fix regression in intersect1d.
+* `#11779 <https://github.com/numpy/numpy/pull/11779>`__: BUG: Fix test sensitive to platform byte order.
+* `#11781 <https://github.com/numpy/numpy/pull/11781>`__: BUG: Avoid signed overflow in histogram
+* `#11785 <https://github.com/numpy/numpy/pull/11785>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
+* `#11786 <https://github.com/numpy/numpy/pull/11786>`__: BUG: Deprecation triggers segfault
#
# You can set these variables from the command line.
-SPHINXOPTS =
+SPHINXOPTS = -W
SPHINXBUILD = sphinx-build
SPHINXPROJ = NumPyEnhancementProposals
SOURCEDIR = .
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-.PHONY: help Makefile
+.PHONY: help Makefile index
+
+index:
+ python tools/build_index.py
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
+%: Makefile index
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+++ /dev/null
-===========================
-NumPy Enhancement Proposals
-===========================
-
-NumPy Enhancement Proposals (NEPs) describe proposed changes to NumPy.
-NEPs are modeled on Python Enhancement Proposals (PEPs), and are typically
-written up when large changes to NumPy are proposed.
-
-This page provides an overview of all NEPs.
-
-Meta-NEPs (NEPs about NEPs or Processes)
-----------------------------------------
-
-.. toctree::
- :maxdepth: 1
-
- nep-0000
- nep-template
-
-
-Accepted NEPs, implementation in progress
------------------------------------------
-
-.. toctree::
- :maxdepth: 1
-
- nep-0014-dropping-python2.7-proposal
-
-
-Implemented NEPs
-----------------
-
-.. toctree::
- :maxdepth: 1
-
- nep-0001-npy-format
- nep-0005-generalized-ufuncs
- nep-0007-datetime-proposal
- nep-0010-new-iterator-ufunc
- nep-0013-ufunc-overrides
-
-
-Defunct NEPs
-------------
-
-.. toctree::
- :maxdepth: 1
-
- nep-0002-warnfix
- nep-0003-math_config_clean
- nep-0004-datetime-proposal3
- nep-0006-newbugtracker
- nep-0008-groupby_additions
- nep-0009-structured_array_extensions
- nep-0011-deferred-ufunc-evaluation
- nep-0012-missing-data
--- /dev/null
+===========================
+NumPy Enhancement Proposals
+===========================
+
+NumPy Enhancement Proposals (NEPs) describe proposed changes to NumPy.
+NEPs are modeled on Python Enhancement Proposals (PEPs), and are typically
+written up when large changes to NumPy are proposed.
+
+This page provides an overview of all NEPs.
+
+Meta-NEPs (NEPs about NEPs or Processes)
+----------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Type'] == 'Process' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+ nep-template
+
+Accepted NEPs, implementation in progress
+-----------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Accepted' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+
+Open NEPs (under consideration)
+-------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Draft' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+
+
+Implemented NEPs
+----------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Final' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+Deferred NEPs
+-------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Deferred' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+Rejected NEPs
+-------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Rejected' %}
+ NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
-=======================
-NEP Purpose and Process
-=======================
+===================
+Purpose and Process
+===================
:Author: Jarrod Millman <millman@berkeley.edu>
-:Status: Draft
+:Status: Active
:Type: Process
:Created: 2017-12-11
suitable for a NEP. Posting to the numpy-discussion `mailing list`_ is the best
way to go about doing this.
-Following a discussion on the mailing list, the proposal should be submitted as
-a draft NEP via a `GitHub pull request`_ to the ``doc/neps`` directory with the
-name ``nep-<n>.rst`` where ``<n>`` is an appropriately assigned four-digit
-number (e.g., ``nep-0000.rst``). The draft must use the :doc:`nep-template`
-file. Once a formal proposal has been submitted as a PR, it should be announced
-on the mailing list.
+The proposal should be submitted as a draft NEP via a `GitHub pull
+request`_ to the ``doc/neps`` directory with the name ``nep-<n>.rst``
+where ``<n>`` is an appropriately assigned four-digit number (e.g.,
+``nep-0000.rst``). The draft must use the :doc:`nep-template` file.
+
+Once the PR is in place, the NEP should be announced on the mailing
+list for discussion (comments on the PR itself should be restricted to
+minor editorial and technical fixes).
+
+At the earliest convenience, the PR should be merged (regardless of
+whether it is accepted during discussion). Additional PRs may be made
+by the Author to update or expand the NEP, or by maintainers to set
+its status, discussion URL, etc.
Standards Track NEPs consist of two parts, a design document and a
reference implementation. It is generally recommended that at least a
Review and Resolution
^^^^^^^^^^^^^^^^^^^^^
-NEPs are discussed on the mailing list and perhaps in other forums.
-Sometimes NEPs will grow out of an existing pull request.
-The possible paths of the status of NEPs are as follows:
+NEPs are discussed on the mailing list. The possible paths of the
+status of NEPs are as follows:
.. image:: _static/nep-0000.png
least the ``Resolution`` header should be added with a link to the relevant
post in the mailing list archives.
-NEPs can also be ``Replaced`` by a different NEP, rendering the original
-obsolete. Process NEPs may also have a status of
-``Active`` if they are never meant to be completed. E.g. NEP 0 (this NEP).
+NEPs can also be ``Superseded`` by a different NEP, rendering the
+original obsolete. The ``Replaced-By`` and ``Replaces`` headers
+should be added to the original and new NEPs respectively.
+
+Process NEPs may also have a status of ``Active`` if they are never
+meant to be completed, e.g. NEP 0 (this NEP).
Maintenance
.. _issue tracker: https://github.com/numpy/numpy/issues
.. _NumPy Steering Council:
- https://docs.scipy.org/doc/numpy-dev/dev/governance/governance.html
+ https://docs.scipy.org/doc/numpy/dev/governance/governance.html
.. _`GitHub pull request`: https://github.com/numpy/numpy/pulls
A Simple File Format for NumPy Arrays
=====================================
-Author: Robert Kern <robert.kern@gmail.com>
-Status: Draft
-Created: 20-Dec-2007
-
+:Author: Robert Kern <robert.kern@gmail.com>
+:Status: Final
+:Created: 20-Dec-2007
Abstract
--------
:Author: David Cournapeau
:Contact: david@ar.media.kyoto-u.ac.jp
:Date: 2008-09-04
+:Status: Deferred
Executive summary
=================
=============
Each compiler detects a different set of potential errors. The baseline will
-be gcc -Wall -W -Wextra. Ideally, a complete set would be nice:
+be gcc -Wall -W -Wextra. Ideally, a complete set would be nice::
--W -Wall -Wextra -Wstrict-prototypes -Wmissing-prototypes -Waggregate-return
--Wcast-align -Wcast-qual -Wnested-externs -Wshadow -Wbad-function-cast
--Wwrite-strings "
+ -W -Wall -Wextra -Wstrict-prototypes -Wmissing-prototypes -Waggregate-return
+ -Wcast-align -Wcast-qual -Wnested-externs -Wshadow -Wbad-function-cast
+ -Wwrite-strings "
-Intel compiler, VS with /W3 /Wall, Sun compilers have extra warnings too.
+Intel compiler, VS with ``/W3 /Wall``, Sun compilers have extra warnings too.
Kind of warnings
================
uses compiler specific code to tag the variable, and mangle it such as it is
not possible to use it accidentally once it is tagged.
-The code to apply compiler specific option could be:
+The code to apply compiler specific option could be::
-#if defined(__GNUC__)
- #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
-# elif defined(__ICC)
- #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
-#else
- #define __COMP_NPY_UNUSED
-#endif
+ #if defined(__GNUC__)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ # elif defined(__ICC)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ #else
+ #define __COMP_NPY_UNUSED
+ #endif
-The variable mangling would be:
+The variable mangling would be::
-#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED
+ #define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED
-When applied to a variable, one would get:
+When applied to a variable, one would get::
-int foo(int * NPY_UNUSED(dummy))
+ int foo(int * NPY_UNUSED(dummy))
expanded to
-int foo(int * __NPY_UNUSED_TAGGEDdummy __COMP_NPY_UNUSED)
+::
+
+ int foo(int * __NPY_UNUSED_TAGGEDdummy __COMP_NPY_UNUSED)
Thus avoiding any accidental use of the variable. The mangling is pure C, and
thuse portable. The per-variable warning disabling is compiler specific.
:Author: David Cournapeau
:Contact: david@ar.media.kyoto-u.ac.jp
:Date: 2008-09-04
+:Status: Deferred
Executive summary
=================
:Author: Ivan Vilata i Balaguer
:Contact: ivan@selidor.net
:Date: 2008-07-30
-
+:Status: Deferred
Executive summary
=================
Generalized Universal Functions
===============================
+:Status: Final
+
There is a general need for looping over not only functions on scalars
but also over functions on vectors (or arrays), as explained on
http://scipy.org/scipy/numpy/wiki/GeneralLoopingFunctions. We propose
===========================================
:Author: David Cournapeau, Stefan van der Walt
+:Status: Deferred
Some release managers of both numpy and scipy are becoming more and more
dissatisfied with the current development workflow, in particular for bug
:Author: Travis Oliphant
:Contact: oliphant@enthought.com
:Date: 2009-06-09
+:Status: Final
Revised only slightly from the third proposal by
:Contact: ivan@selidor.net
:Date: 2008-07-30
-
Executive summary
=================
:Author: Travis Oliphant
:Contact: oliphant@enthought.com
:Date: 2010-04-27
+:Status: Deferred
Executive summary
================
Suppose you have a NumPy structured array containing information about
the number of purchases at several stores over multiple days. To be clear, the
-structured array data-type is:
+structured array data-type is::
-dt = [('year', i2), ('month', i1), ('day', i1), ('time', float),
+ dt = [('year', i2), ('month', i1), ('day', i1), ('time', float),
('store', i4), ('SKU', 'S6'), ('number', i4)]
Suppose there is a 1-d NumPy array of this data-type and you would like
Functions proposed
==================
-segment::
-
-
-edges::
-
-
-.. Local Variables:
-.. mode: rst
-.. coding: utf-8
-.. fill-column: 72
-.. End:
+- segment
+- edges
Structured array extensions
===========================
+:Status: Deferred
+
1. Create with-style context that makes "named-columns" available as names in the namespace.
with np.columns(array):
:Author: Mark Wiebe <mwwiebe@gmail.com>
:Content-Type: text/x-rst
:Created: 25-Nov-2010
+:Status: Final
*****************
Table of Contents
:Author: Mark Wiebe <mwwiebe@gmail.com>
:Content-Type: text/x-rst
:Created: 30-Nov-2010
+:Status: Deferred
********
Abstract
:Copyright: Copyright 2011 by Enthought, Inc
:License: CC By-SA 3.0 (http://creativecommons.org/licenses/by-sa/3.0/)
:Date: 2011-06-23
+:Status: Deferred
*****************
Table of Contents
-.. _neps.ufunc-overrides:
-
=================================
A Mechanism for Overriding Ufuncs
=================================
:Author: Stephan Hoyer
:Date: 2017-03-31
+:Status: Final
+
Executive summary
=================
- *ufunc* is the ufunc object that was called.
- *method* is a string indicating how the Ufunc was called, either
``"__call__"`` to indicate it was called directly, or one of its
- :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``,
- ``"reduceat"``, ``"outer"``, or ``"at"``.
+ methods: ``"reduce"``, ``"accumulate"``, ``"reduceat"``, ``"outer"``,
+ or ``"at"``.
- *inputs* is a tuple of the input arguments to the ``ufunc``
- *kwargs* contains any optional or keyword arguments passed to the
function. This includes any ``out`` arguments, which are always
Plan for dropping Python 2.7 support
====================================
+:Status: Accepted
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2017-November/077419.html
+
The Python core team plans to stop supporting Python 2 in 2020. The NumPy
project has supported both Python 2 and Python 3 in parallel since 2010, and
has found that supporting Python 2 is an increasing burden on our limited
--- /dev/null
+=======================
+Split Out Masked Arrays
+=======================
+
+:Author: Stéfan van der Walt <stefanv@berkeley.edu>
+:Status: Rejected
+:Type: Standards Track
+:Created: 2018-03-22
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-May/078026.html
+
+Abstract
+--------
+
+This NEP proposes removing MaskedArray functionality from NumPy, and
+publishing it as a stand-alone package.
+
+Detailed description
+--------------------
+
+MaskedArrays are a sub-class of the NumPy ``ndarray`` that adds
+masking capabilities, i.e. the ability to ignore or hide certain array
+values during computation.
+
+While historically convenient to distribute this class inside of NumPy,
+improved packaging has made it possible to distribute it separately
+without difficulty.
+
+Motivations for this move include:
+
+ * Focus: the NumPy package should strive to only include the
+ `ndarray` object, and the essential utilities needed to manipulate
+ such arrays.
+ * Complexity: the MaskedArray implementation is non-trivial, and imposes
+ a significant maintenance burden.
+ * Compatibility: MaskedArray objects, being subclasses [1]_ of `ndarrays`,
+ often cause complications when being used with other packages.
+ Fixing these issues is outside the scope of NumPy development.
+
+This NEP proposes a deprecation pathway through which MaskedArrays
+would still be accessible to users, but no longer as part of the core
+package.
+
+Implementation
+--------------
+
+Currently, a MaskedArray is created as follows::
+
+ from numpy import ma
+ ma.array([1, 2, 3], mask=[True, False, True])
+
+This will return an array where the values 1 and 3 are masked (no
+longer visible to operations such as `np.sum`).
+
+We propose refactoring the `np.ma` subpackage into a new
+pip-installable library called `maskedarray` [2]_, which would be used
+in a similar fashion::
+
+ import maskedarray as ma
+ ma.array([1, 2, 3], mask=[True, False, True])
+
+For two releases of NumPy, `maskedarray` would become a NumPy
+dependency, and would expose MaskedArrays under the existing name,
+`np.ma`. If imported as `np.ma`, a `NumpyDeprecationWarning` will
+be raised, describing the impending deprecation with instructions on
+how to modify code to use `maskedarray`.
+
+After two releases, `np.ma` will be removed entirely. In order to obtain
+`np.ma`, a user will install it via `pip install` or via their package
+manager. Subsequently, `importing maskedarray` on a version of NumPy that
+includes it intgrally will raise an `ImportError`.
+
+Documentation
+`````````````
+
+NumPy's internal documentation refers explicitly to MaskedArrays in
+certain places, e.g. `ndarray.concatenate`:
+
+> When one or more of the arrays to be concatenated is a MaskedArray,
+> this function will return a MaskedArray object instead of an ndarray,
+> but the input masks are *not* preserved. In cases where a MaskedArray
+> is expected as input, use the ma.concatenate function from the masked
+> array module instead.
+
+Such documentation will be removed, since the expectation is that
+users of `maskedarray` will use methods from that package to operate
+on MaskedArrays.
+
+Other appearances
+~~~~~~~~~~~~~~~~~
+
+Explicit MaskedArray support will be removed from:
+
+- `numpygenfromtext`
+- `numpy.libmerge_arrays`, `numpy.lib.stack_arrays`
+
+Backward compatibility
+----------------------
+
+For two releases of NumPy, apart from a deprecation notice, there will
+be no user visible changes. Thereafter, `np.ma` will no longer be
+available (instead, MaskedArrays will live in the `maskedarray`
+package).
+
+Note also that new PEPs on array-like objects may eventually provide
+better support for MaskedArrays than is currently available.
+
+Alternatives
+------------
+
+After a lively discussion on the mailing list:
+
+- There is support (and active interest in) making a better *new* masked array
+ class.
+- The new class should be a consumer of the external NumPy API with no special
+ status (unlike today where there are hacks across the codebase to support it)
+- `MaskedArray` will stay where it is, at least until the new masked array
+ class materializes and has been tried in the wild.
+
+References and Footnotes
+------------------------
+
+.. [1] Subclassing ndarray,
+ https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
+.. [2] PyPi: maskedarray, https://pypi.org/project/maskedarray/
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+==================================================
+NEP: Dispatch Mechanism for NumPy's high level API
+==================================================
+
+:Author: Stephan Hoyer <shoyer@google.com>
+:Author: Matthew Rocklin <mrocklin@gmail.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2018-05-29
+
+Abstact
+-------
+
+We propose a protocol to allow arguments of numpy functions to define
+how that function operates on them. This allows other libraries that
+implement NumPy's high level API to reuse Numpy functions. This allows
+libraries that extend NumPy's high level API to apply to more NumPy-like
+libraries.
+
+Detailed description
+--------------------
+
+Numpy's high level ndarray API has been implemented several times
+outside of NumPy itself for different architectures, such as for GPU
+arrays (CuPy), Sparse arrays (scipy.sparse, pydata/sparse) and parallel
+arrays (Dask array) as well as various Numpy-like implementations in the
+deep learning frameworks, like TensorFlow and PyTorch.
+
+Similarly there are several projects that build on top of the Numpy API
+for labeled and indexed arrays (XArray), automatic differentation
+(Autograd, Tangent), higher order array factorizations (TensorLy), etc.
+that add additional functionality on top of the Numpy API.
+
+We would like to be able to use these libraries together, for example we
+would like to be able to place a CuPy array within XArray, or perform
+automatic differentiation on Dask array code. This would be easier to
+accomplish if code written for NumPy ndarrays could also be used by
+other NumPy-like projects.
+
+For example, we would like for the following code example to work
+equally well with any Numpy-like array object:
+
+.. code:: python
+
+ def f(x):
+ y = np.tensordot(x, x.T)
+ return np.mean(np.exp(y))
+
+Some of this is possible today with various protocol mechanisms within
+Numpy.
+
+- The ``np.exp`` function checks the ``__array_ufunc__`` protocol
+- The ``.T`` method works using Python's method dispatch
+- The ``np.mean`` function explicitly checks for a ``.mean`` method on
+ the argument
+
+However other functions, like ``np.tensordot`` do not dispatch, and
+instead are likely to coerce to a Numpy array (using the ``__array__``)
+protocol, or err outright. To achieve enough coverage of the NumPy API
+to support downstream projects like XArray and autograd we want to
+support *almost all* functions within Numpy, which calls for a more
+reaching protocol than just ``__array_ufunc__``. We would like a
+protocol that allows arguments of a NumPy function to take control and
+divert execution to another function (for example a GPU or parallel
+implementation) in a way that is safe and consistent across projects.
+
+Implementation
+--------------
+
+We propose adding support for a new protocol in NumPy,
+``__array_function__``.
+
+This protocol is intended to be a catch-all for NumPy functionality that
+is not covered by existing protocols, like reductions (like ``np.sum``)
+or universal functions (like ``np.exp``). The semantics are very similar
+to ``__array_ufunc__``, except the operation is specified by an
+arbitrary callable object rather than a ufunc instance and method.
+
+The interface
+~~~~~~~~~~~~~
+
+We propose the following signature for implementations of
+``__array_function__``:
+
+.. code-block:: python
+
+ def __array_function__(self, func, types, args, kwargs)
+
+- ``func`` is an arbitrary callable exposed by NumPy's public API,
+ which was called in the form ``func(*args, **kwargs)``.
+- ``types`` is a list of types for all arguments to the original NumPy
+ function call that will be checked for an ``__array_function__``
+ implementation.
+- The tuple ``args`` and dict ``**kwargs`` are directly passed on from the
+ original call.
+
+Unlike ``__array_ufunc__``, there are no high-level guarantees about the
+type of ``func``, or about which of ``args`` and ``kwargs`` may contain objects
+implementing the array API. As a convenience for ``__array_function__``
+implementors of the NumPy API, the ``types`` keyword contains a list of all
+types that implement the ``__array_function__`` protocol. This allows
+downstream implementations to quickly determine if they are likely able to
+support the operation.
+
+Still be determined: what guarantees can we offer for ``types``? Should
+we promise that types are unique, and appear in the order in which they
+are checked?
+
+Example for a project implementing the NumPy API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Most implementations of ``__array_function__`` will start with two
+checks:
+
+1. Is the given function something that we know how to overload?
+2. Are all arguments of a type that we know how to handle?
+
+If these conditions hold, ``__array_function__`` should return
+the result from calling its implementation for ``func(*args, **kwargs)``.
+Otherwise, it should return the sentinel value ``NotImplemented``, indicating
+that the function is not implemented by these types.
+
+.. code:: python
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ HANDLED_FUNCTIONS = {
+ np.concatenate: my_concatenate,
+ np.broadcast_to: my_broadcast_to,
+ np.sum: my_sum,
+ ...
+ }
+
+Necessary changes within the Numpy codebase itself
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This will require two changes within the Numpy codebase:
+
+1. A function to inspect available inputs, look for the
+ ``__array_function__`` attribute on those inputs, and call those
+ methods appropriately until one succeeds. This needs to be fast in the
+ common all-NumPy case.
+
+ This is one additional function of moderate complexity.
+2. Calling this function within all relevant Numpy functions.
+
+ This affects many parts of the Numpy codebase, although with very low
+ complexity.
+
+Finding and calling the right ``__array_function__``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given a Numpy function, ``*args`` and ``**kwargs`` inputs, we need to
+search through ``*args`` and ``**kwargs`` for all appropriate inputs
+that might have the ``__array_function__`` attribute. Then we need to
+select among those possible methods and execute the right one.
+Negotiating between several possible implementations can be complex.
+
+Finding arguments
+'''''''''''''''''
+
+Valid arguments may be directly in the ``*args`` and ``**kwargs``, such
+as in the case for ``np.tensordot(left, right, out=out)``, or they may
+be nested within lists or dictionaries, such as in the case of
+``np.concatenate([x, y, z])``. This can be problematic for two reasons:
+
+1. Some functions are given long lists of values, and traversing them
+ might be prohibitively expensive
+2. Some function may have arguments that we don't want to inspect, even
+ if they have the ``__array_function__`` method
+
+To resolve these we ask the functions to provide an explicit list of
+arguments that should be traversed. This is the ``relevant_arguments=``
+keyword in the examples below.
+
+Trying ``__array_function__`` methods until the right one works
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Many arguments may implement the ``__array_function__`` protocol. Some
+of these may decide that, given the available inputs, they are unable to
+determine the correct result. How do we call the right one? If several
+are valid then which has precedence?
+
+The rules for dispatch with ``__array_function__`` match those for
+``__array_ufunc__`` (see
+`NEP-13 <http://www.numpy.org/neps/nep-0013-ufunc-overrides.html>`_).
+In particular:
+
+- NumPy will gather implementations of ``__array_function__`` from all
+ specified inputs and call them in order: subclasses before
+ superclasses, and otherwise left to right. Note that in some edge cases,
+ this differs slightly from the
+ `current behavior <https://bugs.python.org/issue30140>`_ of Python.
+- Implementations of ``__array_function__`` indicate that they can
+ handle the operation by returning any value other than
+ ``NotImplemented``.
+- If all ``__array_function__`` methods return ``NotImplemented``,
+ NumPy will raise ``TypeError``.
+
+Changes within Numpy functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given a function defined above, for now call it
+``do_array_function_dance``, we now need to call that function from
+within every relevant Numpy function. This is a pervasive change, but of
+fairly simple and innocuous code that should complete quickly and
+without effect if no arguments implement the ``__array_function__``
+protocol. Let us consider a few examples of NumPy functions and how they
+might be affected by this change:
+
+.. code:: python
+
+ def broadcast_to(array, shape, subok=False):
+ success, value = do_array_function_dance(
+ func=broadcast_to,
+ relevant_arguments=[array],
+ args=(array,),
+ kwargs=dict(shape=shape, subok=subok))
+ if success:
+ return value
+
+ ... # continue with the definition of broadcast_to
+
+ def concatenate(arrays, axis=0, out=None)
+ success, value = do_array_function_dance(
+ func=concatenate,
+ relevant_arguments=[arrays, out],
+ args=(arrays,),
+ kwargs=dict(axis=axis, out=out))
+ if success:
+ return value
+
+ ... # continue with the definition of concatenate
+
+The list of objects passed to ``relevant_arguments`` are those that should
+be inspected for ``__array_function__`` implementations.
+
+Alternatively, we could write these overloads with a decorator, e.g.,
+
+.. code:: python
+
+ @overload_for_array_function(['array'])
+ def broadcast_to(array, shape, subok=False):
+ ... # continue with the definition of broadcast_to
+
+ @overload_for_array_function(['arrays', 'out'])
+ def concatenate(arrays, axis=0, out=None):
+ ... # continue with the definition of concatenate
+
+The decorator ``overload_for_array_function`` would be written in terms
+of ``do_array_function_dance``.
+
+The downside of this approach would be a loss of introspection capability
+for NumPy functions on Python 2, since this requires the use of
+``inspect.Signature`` (only available on Python 3). However, NumPy won't
+be supporting Python 2 for `very much longer <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_.
+
+Use outside of NumPy
+~~~~~~~~~~~~~~~~~~~~
+
+Nothing about this protocol that is particular to NumPy itself. Should
+we enourage use of the same ``__array_function__`` protocol third-party
+libraries for overloading non-NumPy functions, e.g., for making
+array-implementation generic functionality in SciPy?
+
+This would offer significant advantages (SciPy wouldn't need to invent
+its own dispatch system) and no downsides that we can think of, because
+every function that dispatches with ``__array_function__`` already needs
+to be explicitly recognized. Libraries like Dask, CuPy, and Autograd
+already wrap a limited subset of SciPy functionality (e.g.,
+``scipy.linalg``) similarly to how they wrap NumPy.
+
+If we want to do this, we should consider exposing the helper function
+``do_array_function_dance()`` above as a public API.
+
+Non-goals
+---------
+
+We are aiming for basic strategy that can be relatively mechanistically
+applied to almost all functions in NumPy's API in a relatively short
+period of time, the development cycle of a single NumPy release.
+
+We hope to get both the ``__array_function__`` protocol and all specific
+overloads right on the first try, but our explicit aim here is to get
+something that mostly works (and can be iterated upon), rather than to
+wait for an optimal implementation. The price of moving fast is that for
+now **this protocol should be considered strictly experimental**. We
+reserve the right to change the details of this protocol and how
+specific NumPy functions use it at any time in the future -- even in
+otherwise bug-fix only releases of NumPy.
+
+In particular, we don't plan to write additional NEPs that list all
+specific functions to overload, with exactly how they should be
+overloaded. We will leave this up to the discretion of committers on
+individual pull requests, trusting that they will surface any
+controversies for discussion by interested parties.
+
+However, we already know several families of functions that should be
+explicitly exclude from ``__array_function__``. These will need their
+own protocols:
+
+- universal functions, which already have their own protocol.
+- ``array`` and ``asarray``, because they are explicitly intended for
+ coercion to actual ``numpy.ndarray`` object.
+- dispatch for methods of any kind, e.g., methods on
+ ``np.random.RandomState`` objects.
+
+As a concrete example of how we expect to break behavior in the future,
+some functions such as ``np.where`` are currently not NumPy universal
+functions, but conceivably could become universal functions in the
+future. When/if this happens, we will change such overloads from using
+``__array_function__`` to the more specialized ``__array_ufunc__``.
+
+
+Backward compatibility
+----------------------
+
+This proposal does not change existing semantics, except for those arguments
+that currently have ``__array_function__`` methods, which should be rare.
+
+
+Alternatives
+------------
+
+Specialized protocols
+~~~~~~~~~~~~~~~~~~~~~
+
+We could (and should) continue to develop protocols like
+``__array_ufunc__`` for cohesive subsets of Numpy functionality.
+
+As mentioned above, if this means that some functions that we overload
+with ``__array_function__`` should switch to a new protocol instead,
+that is explicitly OK for as long as ``__array_function__`` retains its
+experimental status.
+
+Separate namespace
+~~~~~~~~~~~~~~~~~~
+
+A separate namespace for overloaded functions is another possibility,
+either inside or outside of NumPy.
+
+This has the advantage of alleviating any possible concerns about
+backwards compatibility and would provide the maximum freedom for quick
+experimentation. In the long term, it would provide a clean abstration
+layer, separating NumPy's high level API from default implementations on
+``numpy.ndarray`` objects.
+
+The downsides are that this would require an explicit opt-in from all
+existing code, e.g., ``import numpy.api as np``, and in the long term
+would result in the maintainence of two separate NumPy APIs. Also, many
+functions from ``numpy`` itself are already overloaded (but
+inadequately), so confusion about high vs. low level APIs in NumPy would
+still persist.
+
+Multiple dispatch
+~~~~~~~~~~~~~~~~~
+
+An alternative to our suggestion of the ``__array_function__`` protocol
+would be implementing NumPy's core functions as
+`multi-methods <https://en.wikipedia.org/wiki/Multiple_dispatch>`_.
+Although one of us wrote a `multiple dispatch
+library <https://github.com/mrocklin/multipledispatch>`_ for Python, we
+don't think this approach makes sense for NumPy in the near term.
+
+The main reason is that NumPy already has a well-proven dispatching
+mechanism with ``__array_ufunc__``, based on Python's own dispatching
+system for arithemtic, and it would be confusing to add another
+mechanism that works in a very different way. This would also be more
+invasive change to NumPy itself, which would need to gain a multiple
+dispatch implementation.
+
+It is possible that multiple dispatch implementation for NumPy's high
+level API could make sense in the future. Fortunately,
+``__array_function__`` does not preclude this possibility, because it
+would be straightforward to write a shim for a default
+``__array_function__`` implementation in terms of multiple dispatch.
+
+Implementations in terms of a limited core API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The internal implemenations of some NumPy functions is extremely simple.
+For example: - ``np.stack()`` is implemented in only a few lines of code
+by combining indexing with ``np.newaxis``, ``np.concatenate`` and the
+``shape`` attribute. - ``np.mean()`` is implemented internally in terms
+of ``np.sum()``, ``np.divide()``, ``.astype()`` and ``.shape``.
+
+This suggests the possibility of defining a minimal "core" ndarray
+interface, and relying upon it internally in NumPy to implement the full
+API. This is an attractive option, because it could significantly reduce
+the work required for new array implementations.
+
+However, this also comes with several downsides: 1. The details of how
+NumPy implements a high-level function in terms of overloaded functions
+now becomes an implicit part of NumPy's public API. For example,
+refactoring ``stack`` to use ``np.block()`` instead of
+``np.concatenate()`` internally would now become a breaking change. 2.
+Array libraries may prefer to implement high level functions differently
+than NumPy. For example, a library might prefer to implement a
+fundamental operations like ``mean()`` directly rather than relying on
+``sum()`` followed by division. More generally, it's not clear yet what
+exactly qualifies as core functionality, and figuring this out could be
+a large project. 3. We don't yet have an overloading system for
+attributes and methods on array objects, e.g., for accessing ``.dtype``
+and ``.shape``. This should be the subject of a future NEP, but until
+then we should be reluctant to rely on these properties.
+
+Given these concerns, we encourage relying on this approach only in
+limited cases.
+
+Coersion to a NumPy array as a catch-all fallback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+With the current design, classes that implement ``__array_function__``
+to overload at least one function implicitly declare an intent to
+implement the entire NumPy API. It's not possible to implement *only*
+``np.concatenate()`` on a type, but fall back to NumPy's default
+behavior of casting with ``np.asarray()`` for all other functions.
+
+This could present a backwards compatibility concern that would
+discourage libraries from adopting ``__array_function__`` in an
+incremental fashion. For example, currently most numpy functions will
+implicitly convert ``pandas.Series`` objects into NumPy arrays, behavior
+that assuredly many pandas users rely on. If pandas implemented
+``__array_function__`` only for ``np.concatenate``, unrelated NumPy
+functions like ``np.nanmean`` would suddenly break on pandas objects by
+raising TypeError.
+
+With ``__array_ufunc__``, it's possible to alleviate this concern by
+casting all arguments to numpy arrays and re-calling the ufunc, but the
+heterogeneous function signatures supported by ``__array_function__``
+make it impossible to implement this generic fallback behavior for
+``__array_function__``.
+
+We could resolve this issue by change the handling of return values in
+``__array_function__`` in either of two possible ways: 1. Change the
+meaning of all arguments returning ``NotImplemented`` to indicate that
+all arguments should be coerced to NumPy arrays instead. However, many
+array libraries (e.g., scipy.sparse) really don't want implicit
+conversions to NumPy arrays, and often avoid implementing ``__array__``
+for exactly this reason. Implicit conversions can result in silent bugs
+and performance degradation. 2. Use another sentinel value of some sort
+to indicate that a class implementing part of the higher level array API
+is coercible as a fallback, e.g., a return value of
+``np.NotImplementedButCoercible`` from ``__array_function__``.
+
+If we take this second approach, we would need to define additional
+rules for how coercible array arguments are coerced, e.g., - Would we
+try for ``__array_function__`` overloads again after coercing coercible
+arguments? - If so, would we coerce coercible arguments one-at-a-time,
+or all-at-once?
+
+These are slightly tricky design questions, so for now we propose to
+defer this issue. We can always implement
+``np.NotImplementedButCoercible`` at some later time if it proves
+critical to the numpy community in the future. Importantly, we don't
+think this will stop critical libraries that desire to implement most of
+the high level NumPy API from adopting this proposal.
+
+NOTE: If you are reading this NEP in its draft state and disagree,
+please speak up on the mailing list!
+
+Drawbacks of this approach
+--------------------------
+
+Future difficulty extending NumPy's API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One downside of passing on all arguments directly on to
+``__array_function__`` is that it makes it hard to extend the signatures
+of overloaded NumPy functions with new arguments, because adding even an
+optional keyword argument would break existing overloads.
+
+This is not a new problem for NumPy. NumPy has occasionally changed the
+signature for functions in the past, including functions like
+``numpy.sum`` which support overloads.
+
+For adding new keyword arguments that do not change default behavior, we
+would only include these as keyword arguments when they have changed
+from default values. This is similar to `what NumPy already has
+done <https://github.com/numpy/numpy/blob/v1.14.2/numpy/core/fromnumeric.py#L1865-L1867>`_,
+e.g., for the optional ``keepdims`` argument in ``sum``:
+
+.. code:: python
+
+ def sum(array, ..., keepdims=np._NoValue):
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return array.sum(..., **kwargs)
+
+In other cases, such as deprecated arguments, preserving the existing
+behavior of overloaded functions may not be possible. Libraries that use
+``__array_function__`` should be aware of this risk: we don't propose to
+freeze NumPy's API in stone any more than it already is.
+
+Difficulty adding implementation specific arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some array implementations generally follow NumPy's API, but have
+additional optional keyword arguments (e.g., ``dask.array.sum()`` has
+``split_every`` and ``tensorflow.reduce_sum()`` has ``name``). A generic
+dispatching library could potentially pass on all unrecognized keyword
+argument directly to the implementation, but extending ``np.sum()`` to
+pass on ``**kwargs`` would entail public facing changes in NumPy.
+Customizing the detailed behavior of array libraries will require using
+library specific functions, which could be limiting in the case of
+libraries that consume the NumPy API such as xarray.
+
+
+Discussion
+----------
+
+Various alternatives to this proposal were discussed in a few Github issues:
+
+1. `pydata/sparse #1 <https://github.com/pydata/sparse/issues/1>`_
+2. `numpy/numpy #11129 <https://github.com/numpy/numpy/issues/11129>`_
+
+Additionally it was the subject of `a blogpost
+<http://matthewrocklin.com/blog/work/2018/05/27/beyond-numpy>`_ Following this
+it was discussed at a `NumPy developer sprint
+<https://scisprints.github.io/#may-numpy-developer-sprint>`_ at the `UC
+Berkeley Institute for Data Science (BIDS) <https://bids.berkeley.edu/>`_.
+
+
+References and Footnotes
+------------------------
+
+.. [1] Each NEP must either be explicitly labeled as placed in the public domain (see
+ this NEP as an example) or licensed under the `Open Publication License`_.
+
+.. _Open Publication License: http://www.opencontent.org/openpub/
+
+
+Copyright
+---------
+
+This document has been placed in the public domain. [1]_
--- /dev/null
+==============================
+Random Number Generator Policy
+==============================
+
+:Author: Robert Kern <robert.kern@gmail.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2018-05-24
+
+
+Abstract
+--------
+
+For the past decade, NumPy has had a strict backwards compatibility policy for
+the number stream of all of its random number distributions. Unlike other
+numerical components in ``numpy``, which are usually allowed to return
+different when results when they are modified if they remain correct, we have
+obligated the random number distributions to always produce the exact same
+numbers in every version. The objective of our stream-compatibility guarantee
+was to provide exact reproducibility for simulations across numpy versions in
+order to promote reproducible research. However, this policy has made it very
+difficult to enhance any of the distributions with faster or more accurate
+algorithms. After a decade of experience and improvements in the surrounding
+ecosystem of scientific software, we believe that there are now better ways to
+achieve these objectives. We propose relaxing our strict stream-compatibility
+policy to remove the obstacles that are in the way of accepting contributions
+to our random number generation capabilities.
+
+
+The Status Quo
+--------------
+
+Our current policy, in full:
+
+ A fixed seed and a fixed series of calls to ``RandomState`` methods using the
+ same parameters will always produce the same results up to roundoff error
+ except when the values were incorrect. Incorrect values will be fixed and
+ the NumPy version in which the fix was made will be noted in the relevant
+ docstring. Extension of existing parameter ranges and the addition of new
+ parameters is allowed as long the previous behavior remains unchanged.
+
+This policy was first instated in Nov 2008 (in essence; the full set of weasel
+words grew over time) in response to a user wanting to be sure that the
+simulations that formed the basis of their scientific publication could be
+reproduced years later, exactly, with whatever version of ``numpy`` that was
+current at the time. We were keen to support reproducible research, and it was
+still early in the life of ``numpy.random``. We had not seen much cause to
+change the distribution methods all that much.
+
+We also had not thought very thoroughly about the limits of what we really
+could promise (and by “we” in this section, we really mean Robert Kern, let’s
+be honest). Despite all of the weasel words, our policy overpromises
+compatibility. The same version of ``numpy`` built on different platforms, or
+just in a different way could cause changes in the stream, with varying degrees
+of rarity. The biggest is that the ``.multivariate_normal()`` method relies on
+``numpy.linalg`` functions. Even on the same platform, if one links ``numpy``
+with a different LAPACK, ``.multivariate_normal()`` may well return completely
+different results. More rarely, building on a different OS or CPU can cause
+differences in the stream. We use C ``long`` integers internally for integer
+distribution (it seemed like a good idea at the time), and those can vary in
+size depending on the platform. Distribution methods can overflow their
+internal C ``longs`` at different breakpoints depending on the platform and
+cause all of the random variate draws that follow to be different.
+
+And even if all of that is controlled, our policy still does not provide exact
+guarantees across versions. We still do apply bug fixes when correctness is at
+stake. And even if we didn’t do that, any nontrivial program does more than
+just draw random numbers. They do computations on those numbers, transform
+those with numerical algorithms from the rest of ``numpy``, which is not
+subject to so strict a policy. Trying to maintain stream-compatibility for our
+random number distributions does not help reproducible research for these
+reasons.
+
+The standard practice now for bit-for-bit reproducible research is to pin all
+of the versions of code of your software stack, possibly down to the OS itself.
+The landscape for accomplishing this is much easier today than it was in 2008.
+We now have ``pip``. We now have virtual machines. Those who need to
+reproduce simulations exactly now can (and ought to) do so by using the exact
+same version of ``numpy``. We do not need to maintain stream-compatibility
+across ``numpy`` versions to help them.
+
+Our stream-compatibility guarantee has hindered our ability to make
+improvements to ``numpy.random``. Several first-time contributors have
+submitted PRs to improve the distributions, usually by implementing a faster,
+or more accurate algorithm than the one that is currently there.
+Unfortunately, most of them would have required breaking the stream to do so.
+Blocked by our policy, and our inability to work around that policy, many of
+those contributors simply walked away.
+
+
+Implementation
+--------------
+
+We propose first freezing ``RandomState`` as it is and developing a new RNG
+subsystem alongside it. This allows anyone who has been relying on our old
+stream-compatibility guarantee to have plenty of time to migrate.
+``RandomState`` will be considered deprecated, but with a long deprecation
+cycle, at least a few years. Deprecation warnings will start silent but become
+increasingly noisy over time. Bugs in the current state of the code will *not*
+be fixed if fixing them would impact the stream. However, if changes in the
+rest of ``numpy`` would break something in the ``RandomState`` code, we will
+fix ``RandomState`` to continue working (for example, some change in the
+C API). No new features will be added to ``RandomState``. Users should
+migrate to the new subsystem as they are able to.
+
+Work on a proposed `new PRNG subsystem
+<https://github.com/bashtage/randomgen>`_ is already underway. The specifics
+of the new design are out of scope for this NEP and up for much discussion, but
+we will discuss general policies that will guide the evolution of whatever code
+is adopted.
+
+First, we will maintain API source compatibility just as we do with the rest of
+``numpy``. If we *must* make a breaking change, we will only do so with an
+appropriate deprecation period and warnings.
+
+Second, breaking stream-compatibility in order to introduce new features or
+improve performance will be *allowed* with *caution*. Such changes will be
+considered features, and as such will be no faster than the standard release
+cadence of features (i.e. on ``X.Y`` releases, never ``X.Y.Z``). Slowness is
+not a bug. Correctness bug fixes that break stream-compatibility can happen on
+bugfix releases, per usual, but developers should consider if they can wait
+until the next feature release. We encourage developers to strongly weight
+user’s pain from the break in stream-compatibility against the improvements.
+One example of a worthwhile improvement would be to change algorithms for
+a significant increase in performance, for example, moving from the `Box-Muller
+transform <https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>`_ method
+of Gaussian variate generation to the faster `Ziggurat algorithm
+<https://en.wikipedia.org/wiki/Ziggurat_algorithm>`_. An example of an
+unworthy improvement would be tweaking the Ziggurat tables just a little bit.
+
+Any new design for the RNG subsystem will provide a choice of different core
+uniform PRNG algorithms. We will be more strict about a select subset of
+methods on these core PRNG objects. They MUST guarantee stream-compatibility
+for a minimal, specified set of methods which are chosen to make it easier to
+compose them to build other distributions. Namely,
+
+ * ``.bytes()``
+ * ``.random_uintegers()``
+ * ``.random_sample()``
+
+Furthermore, the new design should also provide one generator class (we shall
+call it ``StableRandom`` for discussion purposes) that provides a slightly
+broader subset of distribution methods for which stream-compatibility is
+*guaranteed*. The point of ``StableRandom`` is to provide something that can
+be used in unit tests so projects that currently have tests which rely on the
+precise stream can be migrated off of ``RandomState``. For the best
+transition, ``StableRandom`` should use as its core uniform PRNG the current
+MT19937 algorithm. As best as possible, the API for the distribution methods
+that are provided on ``StableRandom`` should match their counterparts on
+``RandomState``. They should provide the same stream that the current version
+of ``RandomState`` does. Because their intended use is for unit tests, we do
+not need the performance improvements from the new algorithms that will be
+introduced by the new subsystem.
+
+The list of ``StableRandom`` methods should be chosen to support unit tests:
+
+ * ``.randint()``
+ * ``.uniform()``
+ * ``.normal()``
+ * ``.standard_normal()``
+ * ``.choice()``
+ * ``.shuffle()``
+ * ``.permutation()``
+
+
+Not Versioning
+--------------
+
+For a long time, we considered that the way to allow algorithmic improvements
+while maintaining the stream was to apply some form of versioning. That is,
+every time we make a stream change in one of the distributions, we increment
+some version number somewhere. ``numpy.random`` would keep all past versions
+of the code, and there would be a way to get the old versions. Proposals of
+how to do this exactly varied widely, but we will not exhaustively list them
+here. We spent years going back and forth on these designs and were not able
+to find one that sufficed. Let that time lost, and more importantly, the
+contributors that we lost while we dithered, serve as evidence against the
+notion.
+
+Concretely, adding in versioning makes maintenance of ``numpy.random``
+difficult. Necessarily, we would be keeping lots of versions of the same code
+around. Adding a new algorithm safely would still be quite hard.
+
+But most importantly, versioning is fundamentally difficult to *use* correctly.
+We want to make it easy and straightforward to get the latest, fastest, best
+versions of the distribution algorithms; otherwise, what's the point? The way
+to make that easy is to make the latest the default. But the default will
+necessarily change from release to release, so the user’s code would need to be
+altered anyway to specify the specific version that one wants to replicate.
+
+Adding in versioning to maintain stream-compatibility would still only provide
+the same level of stream-compatibility that we currently do, with all of the
+limitations described earlier. Given that the standard practice for such needs
+is to pin the release of ``numpy`` as a whole, versioning ``RandomState`` alone
+is superfluous.
+
+
+Discussion
+----------
+
+- https://mail.python.org/pipermail/numpy-discussion/2018-January/077608.html
+- https://github.com/numpy/numpy/pull/10124#issuecomment-350876221
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
:Author: <list of authors' real names and optionally, email addresses>
:Status: <Draft | Active | Accepted | Deferred | Rejected | Withdrawn | Final | Superseded>
-:Type: <Standards Track | Informational | Process>
+:Type: <Standards Track | Process>
:Created: <date created on, in yyyy-mm-dd format>
-
+:Resolution: <url> (required for Accepted | Rejected | Withdrawn)
Abstract
--------
--- /dev/null
+"""
+Scan the directory of nep files and extract their metadata. The
+metadata is passed to Jinja for filling out `index.rst.tmpl`.
+"""
+
+import os
+import sys
+import jinja2
+import glob
+import re
+
+
+def render(tpl_path, context):
+ path, filename = os.path.split(tpl_path)
+ return jinja2.Environment(
+ loader=jinja2.FileSystemLoader(path or './')
+ ).get_template(filename).render(context)
+
+def nep_metadata():
+ ignore = ('nep-template.rst')
+ sources = sorted(glob.glob(r'nep-*.rst'))
+ sources = [s for s in sources if not s in ignore]
+
+ meta_re = r':([a-zA-Z\-]*): (.*)'
+
+ neps = {}
+ print('Loading metadata for:')
+ for source in sources:
+ print(f' - {source}')
+ nr = int(re.match(r'nep-([0-9]{4}).*\.rst', source).group(1))
+
+ with open(source) as f:
+ lines = f.readlines()
+ tags = [re.match(meta_re, line) for line in lines]
+ tags = [match.groups() for match in tags if match is not None]
+ tags = {tag[0]: tag[1] for tag in tags}
+
+ # We could do a clever regexp, but for now just assume the title is
+ # the second line of the document
+ tags['Title'] = lines[1].strip()
+ tags['Filename'] = source
+
+
+ if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'):
+ if not 'Resolution' in tags:
+ raise RuntimeError(
+ f'NEP {nr} is Accepted/Rejected/Withdrawn but '
+ 'has no Resolution tag'
+ )
+
+ neps[nr] = tags
+
+ # Now that we have all of the NEP metadata, do some global consistency
+ # checks
+
+ for nr, tags in neps.items():
+ if tags['Status'] == 'Superseded':
+ if not 'Replaced-By' in tags:
+ raise RuntimeError(
+ f'NEP {nr} has been Superseded, but has no Replaced-By tag'
+ )
+
+ replaced_by = int(tags['Replaced-By'])
+ replacement_nep = neps[replaced_by]
+
+ if not 'Replaces' in replacement_nep:
+ raise RuntimeError(
+ f'NEP {nr} is superseded by {replaced_by}, but that NEP has '
+ f"no Replaces tag."
+ )
+
+ if not int(replacement_nep['Replaces']) == nr:
+ raise RuntimeError(
+ f'NEP {nr} is superseded by {replaced_by}, but that NEP has a '
+ f"Replaces tag of `{replacement_nep['Replaces']}`."
+ )
+
+ if 'Replaces' in tags:
+ replaced_nep = int(tags['Replaces'])
+ replaced_nep_tags = neps[replaced_nep]
+ if not replaced_nep_tags['Status'] == 'Superseded':
+ raise RuntimeError(
+ f'NEP {nr} replaces {replaced_nep}, but that NEP has not '
+ f'been set to Superseded'
+ )
+
+ return {'neps': neps}
+
+
+infile = 'index.rst.tmpl'
+outfile = 'index.rst'
+
+meta = nep_metadata()
+
+print(f'Compiling {infile} -> {outfile}')
+index = render(infile, meta)
+
+with open(outfile, 'w') as f:
+ f.write(index)
--- /dev/null
+==========================
+NumPy 1.14.3 Release Notes
+==========================
+
+This is a bugfix release for a few bugs reported following the 1.14.2 release:
+
+* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15
+* In python2, float types use the new print style when printing to a file
+* style arg in "legacy" print mode now works for 0d arrays
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2.
+
+Contributors
+============
+
+A total of 6 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jonathan March +
+* Malcolm Smith +
+* Matti Picus
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
+* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
+* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
+* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
--- /dev/null
+==========================
+NumPy 1.14.4 Release Notes
+==========================
+
+This is a bugfix release for bugs reported following the 1.14.3 release. The
+most significant fixes are:
+
+* fixes for compiler instruction reordering that resulted in NaN's not being
+ properly propagated in `np.max` and `np.min`,
+
+* fixes for bus faults on SPARC and older ARM due to incorrect alignment
+ checks.
+
+There are also improvements to printing of long doubles on PPC platforms. All
+is not yet perfect on that platform, the whitespace padding is still incorrect
+and is to be fixed in numpy 1.15, consequently NumPy still fails some
+printing-related (and other) unit tests on ppc systems. However, the printed
+values are now correct.
+
+Note that NumPy will error on import if it detects incorrect float32 `dot`
+results. This problem has been seen on the Mac when working in the Anaconda
+enviroment and is due to a subtle interaction between MKL and PyQt5. It is not
+strictly a NumPy problem, but it is best that users be aware of it. See the
+gh-8577 NumPy issue for more information.
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Marten van Kerkwijk
+* Matti Picus
+* Pauli Virtanen
+* Ryan Soklaski +
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 11 pull requests were merged for this release.
+
+* `#11104 <https://github.com/numpy/numpy/pull/11104>`__: BUG: str of DOUBLE_DOUBLE format wrong on ppc64
+* `#11170 <https://github.com/numpy/numpy/pull/11170>`__: TST: linalg: add regression test for gh-8577
+* `#11174 <https://github.com/numpy/numpy/pull/11174>`__: MAINT: add sanity-checks to be run at import time
+* `#11181 <https://github.com/numpy/numpy/pull/11181>`__: BUG: void dtype setup checked offset not actual pointer for alignment
+* `#11194 <https://github.com/numpy/numpy/pull/11194>`__: BUG: Python2 doubles don't print correctly in interactive shell.
+* `#11198 <https://github.com/numpy/numpy/pull/11198>`__: BUG: optimizing compilers can reorder call to npy_get_floatstatus
+* `#11199 <https://github.com/numpy/numpy/pull/11199>`__: BUG: reduce using SSE only warns if inside SSE loop
+* `#11203 <https://github.com/numpy/numpy/pull/11203>`__: BUG: Bytes delimiter/comments in genfromtxt should be decoded
+* `#11211 <https://github.com/numpy/numpy/pull/11211>`__: BUG: Fix reference count/memory leak exposed by better testing
+* `#11219 <https://github.com/numpy/numpy/pull/11219>`__: BUG: Fixes einsum broadcasting bug when optimize=True
+* `#11251 <https://github.com/numpy/numpy/pull/11251>`__: DOC: Document 1.14.4 release.
--- /dev/null
+==========================
+NumPy 1.14.5 Release Notes
+==========================
+
+This is a bugfix release for bugs reported following the 1.14.4 release. The
+most significant fixes are:
+
+* fixes for compilation errors on alpine and NetBSD
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
+
+Contributors
+============
+
+A total of 1 person contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+
+Pull requests merged
+====================
+
+A total of 2 pull requests were merged for this release.
+
+* `#11274 <https://github.com/numpy/numpy/pull/11274>`__: BUG: Correct use of NPY_UNUSED.
+* `#11294 <https://github.com/numpy/numpy/pull/11294>`__: BUG: Remove extra trailing parentheses.
+
NumPy 1.15.0 Release Notes
==========================
+NumPy 1.15.0 is a release with an unusual number of cleanups, many deprecations
+of old functions, and improvements to many existing functions. Please read the
+detailed descriptions below to see if you are affected.
+
+For testing, we have switched to pytest as a replacement for the no longer
+maintained nose framework. The old nose based interface remains for downstream
+projects who may still be using it.
+
+The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
+linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
+reported for NumPy 1.14.
+
Highlights
==========
+* NumPy has switched to pytest for testing.
+* A new `numpy.printoptions` context manager.
+* Many improvements to the histogram functions.
+* Support for unicode field names in python 2.7.
+* Improved support for PyPy.
+* Fixes and improvements to `numpy.einsum`.
+
New functions
=============
-* `np.gcd` and `np.lcm`, to compute the greatest common divisor and least
+* `numpy.gcd` and `numpy.lcm`, to compute the greatest common divisor and least
common multiple.
-* `np.ma.stack`, the `np.stack` array-joining function generalized to masked
- arrays.
-* `np.printoptions`, the context manager which sets print options temporarily
+* `numpy.ma.stack`, the `numpy.stack` array-joining function generalized to
+ masked arrays.
+
+* `numpy.quantile` function, an interface to ``percentile`` without factors of
+ 100
+
+* `numpy.nanquantile` function, an interface to ``nanpercentile`` without
+ factors of 100
+
+* `numpy.printoptions`, a context manager that sets print options temporarily
for the scope of the ``with`` block::
>>> with np.printoptions(precision=2):
- ... print(np.array([2.0])) / 3
+ ... print(np.array([2.0]) / 3)
[0.67]
- * `np.histogram_bin_edges`, a function to get the edges of the bins used by a histogram
- without needing to calculate the histogram.
+* `numpy.histogram_bin_edges`, a function to get the edges of the bins used by a
+ histogram without needing to calculate the histogram.
+
+* C functions `npy_get_floatstatus_barrier` and `npy_clear_floatstatus_barrier`
+ have been added to deal with compiler optimization changing the order of
+ operations. See below for details.
+
Deprecations
============
* Aliases of builtin `pickle` functions are deprecated, in favor of their
unaliased ``pickle.<func>`` names:
- * `np.loads`
- * `np.core.numeric.load`
- * `np.core.numeric.loads`
- * `np.ma.loads`, `np.ma.dumps`
- * `np.ma.load`, `np.ma.dump` - these functions already failed on python 3,
- when called with a string.
+ * `numpy.loads`
+ * `numpy.core.numeric.load`
+ * `numpy.core.numeric.loads`
+ * `numpy.ma.loads`, `numpy.ma.dumps`
+ * `numpy.ma.load`, `numpy.ma.dump` - these functions already failed on
+ python 3 when called with a string.
+
+* Multidimensional indexing with anything but a tuple is deprecated. This means
+ that the index list in ``ind = [slice(None), 0]; arr[ind]`` should be changed
+ to a tuple, e.g., ``ind = [slice(None), 0]; arr[tuple(ind)]`` or
+ ``arr[(slice(None), 0)]``. That change is necessary to avoid ambiguity in
+ expressions such as ``arr[[[0, 1], [0, 1]]]``, currently interpreted as
+ ``arr[array([0, 1]), array([0, 1])]``, that will be interpreted
+ as ``arr[array([[0, 1], [0, 1]])]`` in the future.
+
+* Imports from the following sub-modules are deprecated, they will be removed
+ at some future date.
+
+ * `numpy.testing.utils`
+ * `numpy.testing.decorators`
+ * `numpy.testing.nosetester`
+ * `numpy.testing.noseclasses`
+ * `numpy.core.umath_tests`
+
+* Giving a generator to `numpy.sum` is now deprecated. This was undocumented
+ behavior, but worked. Previously, it would calculate the sum of the generator
+ expression. In the future, it might return a different result. Use
+ ``np.sum(np.from_iter(generator))`` or the built-in Python ``sum`` instead.
+
+* Users of the C-API should call ``PyArrayResolveWriteBackIfCopy`` or
+ ``PyArray_DiscardWritbackIfCopy`` on any array with the ``WRITEBACKIFCOPY``
+ flag set, before deallocating the array. A deprecation warning will be
+ emitted if those calls are not used when needed.
+
+* Users of ``nditer`` should use the nditer object as a context manager
+ anytime one of the iterator operands is writeable, so that numpy can
+ manage writeback semantics, or should call ``it.close()``. A
+ `RuntimeWarning` may be emitted otherwise in these cases.
+
+* The ``normed`` argument of ``np.histogram``, deprecated long ago in 1.6.0,
+ now emits a ``DeprecationWarning``.
Future Changes
==============
+* NumPy 1.16 will drop support for Python 3.4.
+* NumPy 1.17 will drop support for Python 2.7.
+
Compatibility notes
===================
+Compiled testing modules renamed and made private
+-------------------------------------------------
+The following compiled modules have been renamed and made private:
+
+* ``umath_tests`` -> ``_umath_tests``
+* ``test_rational`` -> ``_rational_tests``
+* ``multiarray_tests`` -> ``_multiarray_tests``
+* ``struct_ufunc_test`` -> ``_struct_ufunc_tests``
+* ``operand_flag_tests`` -> ``_operand_flag_tests``
+
+The ``umath_tests`` module is still available for backwards compatibility, but
+will be removed in the future.
+
+The ``NpzFile`` returned by ``np.savez`` is now a ``collections.abc.Mapping``
+-----------------------------------------------------------------------------
+This means it behaves like a readonly dictionary, and has a new ``.values()``
+method and ``len()`` implementation.
+
+For python 3, this means that ``.iteritems()``, ``.iterkeys()`` have been
+deprecated, and ``.keys()`` and ``.items()`` now return views and not lists.
+This is consistent with how the builtin ``dict`` type changed between python 2
+and python 3.
+
+Under certain conditions, ``nditer`` must be used in a context manager
+----------------------------------------------------------------------
+When using an `numpy.nditer` with the ``"writeonly"`` or ``"readwrite"`` flags, there
+are some circumstances where nditer doesn't actually give you a view of the
+writable array. Instead, it gives you a copy, and if you make changes to the
+copy, nditer later writes those changes back into your actual array. Currently,
+this writeback occurs when the array objects are garbage collected, which makes
+this API error-prone on CPython and entirely broken on PyPy. Therefore,
+``nditer`` should now be used as a context manager whenever it is used
+with writeable arrays, e.g., ``with np.nditer(...) as it: ...``. You may also
+explicitly call ``it.close()`` for cases where a context manager is unusable,
+for instance in generator expressions.
+
+Numpy has switched to using pytest instead of nose for testing
+--------------------------------------------------------------
+The last nose release was 1.3.7 in June, 2015, and development of that tool has
+ended, consequently NumPy has now switched to using pytest. The old decorators
+and nose tools that were previously used by some downstream projects remain
+available, but will not be maintained. The standard testing utilities,
+``assert_almost_equal`` and such, are not be affected by this change except for
+the nose specific functions ``import_nose`` and ``raises``. Those functions are
+not used in numpy, but are kept for downstream compatibility.
+
+Numpy no longer monkey-patches ``ctypes`` with ``__array_interface__``
+----------------------------------------------------------------------
+Previously numpy added ``__array_interface__`` attributes to all the integer
+types from ``ctypes``.
+
``np.ma.notmasked_contiguous`` and ``np.ma.flatnotmasked_contiguous`` always return lists
-----------------------------------------------------------------------------------------
-This was always the documented behavior, but in reality the result used to be
-any of slice, None, or list.
-
-All downstream users seem to use detect the `None` result from
-``flatnotmasked_contiguous`` and replace it with ``[]``.
-These callers will continue to work as before.
+This is the documented behavior, but previously the result could be any of
+slice, None, or list.
+
+All downstream users seem to check for the ``None`` result from
+``flatnotmasked_contiguous`` and replace it with ``[]``. Those callers will
+continue to work as before.
+
+``np.squeeze`` restores old behavior of objects that cannot handle an ``axis`` argument
+---------------------------------------------------------------------------------------
+Prior to version ``1.7.0``, `numpy.squeeze` did not have an ``axis`` argument and
+all empty axes were removed by default. The incorporation of an ``axis``
+argument made it possible to selectively squeeze single or multiple empty axes,
+but the old API expectation was not respected because axes could still be
+selectively removed (silent success) from an object expecting all empty axes to
+be removed. That silent, selective removal of empty axes for objects expecting
+the old behavior has been fixed and the old behavior restored.
+
+unstructured void array's ``.item`` method now returns a bytes object
+---------------------------------------------------------------------
+``.item`` now returns a ``bytes`` object instead of a buffer or byte array.
+This may affect code which assumed the return value was mutable, which is no
+longer the case.
+
+``copy.copy`` and ``copy.deepcopy`` no longer turn ``masked`` into an array
+---------------------------------------------------------------------------
+Since ``np.ma.masked`` is a readonly scalar, copying should be a no-op. These
+functions now behave consistently with ``np.copy()``.
+
+Multifield Indexing of Structured Arrays will still return a copy
+-----------------------------------------------------------------
+The change that multi-field indexing of structured arrays returns a view
+instead of a copy is pushed back to 1.16. A new method
+``numpy.lib.recfunctions.repack_fields`` has been introduced to help mitigate
+the effects of this change, which can be used to write code compatible with
+both numpy 1.15 and 1.16. For more information on how to update code to account
+for this future change see the "accessing multiple fields" section of the
+`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html>`__.
C API changes
=============
+New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+-----------------------------------------------------------------------------------
+Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+have been added and should be used in place of the ``npy_get_floatstatus``and
+``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
+were rearranging the order of operations when the previous functions were used
+in the ufunc SIMD functions, resulting in the floatstatus flags being checked
+before the operation whose status we wanted to check was run. See `#10339
+<https://github.com/numpy/numpy/issues/10370>`__.
+
+Changes to ``PyArray_GetDTypeTransferFunction``
+-----------------------------------------------
+``PyArray_GetDTypeTransferFunction`` now defaults to using user-defined
+``copyswapn`` / ``copyswap`` for user-defined dtypes. If this causes a
+significant performance hit, consider implementing ``copyswapn`` to reflect the
+implementation of ``PyArray_GetStridedCopyFn``. See `#10898
+<https://github.com/numpy/numpy/pull/10898>`__.
+* Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+ have been added and should be used in place of the ``npy_get_floatstatus``and
+ ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
+ were rearranging the order of operations when the previous functions were
+ used in the ufunc SIMD functions, resulting in the floatstatus flags being '
+ checked before the operation whose status we wanted to check was run.
+ See `#10339 <https://github.com/numpy/numpy/issues/10370>`__.
+
New Features
============
--------------------------------------------------------------------
These compute the greatest common divisor, and lowest common multiple,
respectively. These work on all the numpy integer types, as well as the
-builtin arbitrary-precision `Decimal` and `long` types.
+builtin arbitrary-precision ``Decimal`` and ``long`` types.
Support for cross-platform builds for iOS
-----------------------------------------
-
The build system has been modified to add support for the
``_PYTHON_HOST_PLATFORM`` environment variable, used by ``distutils`` when
compiling on one platform for another platform. This makes it possible to
architectures supported by iOS (i386, x86_64, armv7, armv7s and arm64), and
combining these 5 compiled builds products into a single "fat" binary.
+``return_indices`` keyword added for ``np.intersect1d``
+-------------------------------------------------------
+New keyword ``return_indices`` returns the indices of the two input arrays
+that correspond to the common elements.
+
+``np.quantile`` and ``np.nanquantile``
+--------------------------------------
+Like ``np.percentile`` and ``np.nanpercentile``, but takes quantiles in [0, 1]
+rather than percentiles in [0, 100]. ``np.percentile`` is now a thin wrapper
+around ``np.quantile`` with the extra step of dividing by 100.
+
+
+Build system
+------------
+Added experimental support for the 64-bit RISC-V architecture.
+
+
Improvements
============
-``histogram`` and ``histogramdd` functions have moved to ``np.lib.histograms``
-------------------------------------------------------------------------------
+``np.einsum`` updates
+---------------------
+Syncs einsum path optimization tech between `numpy` and `opt_einsum`. In
+particular, the `greedy` path has received many enhancements by @jcmgray. A
+full list of issues fixed are:
+
+* Arbitrary memory can be passed into the `greedy` path. Fixes gh-11210.
+* The greedy path has been updated to contain more dynamic programming ideas
+ preventing a large number of duplicate (and expensive) calls that figure out
+ the actual pair contraction that takes place. Now takes a few seconds on
+ several hundred input tensors. Useful for matrix product state theories.
+* Reworks the broadcasting dot error catching found in gh-11218 gh-10352 to be
+ a bit earlier in the process.
+* Enhances the `can_dot` functionality that previous missed an edge case (part
+ of gh-11308).
+
+``np.ufunc.reduce`` and related functions now accept an initial value
+---------------------------------------------------------------------
+``np.ufunc.reduce``, ``np.sum``, ``np.prod``, ``np.min`` and ``np.max`` all
+now accept an ``initial`` keyword argument that specifies the value to start
+the reduction with.
+
+``np.flip`` can operate over multiple axes
+------------------------------------------
+``np.flip`` now accepts None, or tuples of int, in its ``axis`` argument. If
+axis is None, it will flip over all the axes.
+
+``histogram`` and ``histogramdd`` functions have moved to ``np.lib.histograms``
+-------------------------------------------------------------------------------
These were originally found in ``np.lib.function_base``. They are still
available under their un-scoped ``np.histogram(dd)`` names, and
to maintain compatibility, aliased at ``np.lib.function_base.histogram(dd)``.
``histogram`` will accept NaN values when explicit bins are given
-----------------------------------------------------------------
Previously it would fail when trying to compute a finite range for the data.
-Since the range is ignored anyway when the bins are given explcitly, this error
+Since the range is ignored anyway when the bins are given explicitly, this error
was needless.
-Note that calling `histogram` on NaN values continues to raise the
-`RuntimeWarning`s typical of working with nan values, which can be silenced
-as usual with `errstate`.
+Note that calling ``histogram`` on NaN values continues to raise the
+``RuntimeWarning`` s typical of working with nan values, which can be silenced
+as usual with ``errstate``.
``histogram`` works on datetime types, when explicit bin edges are given
------------------------------------------------------------------------
Dates, times, and timedeltas can now be histogrammed. The bin edges must be
passed explicitly, and are not yet computed automatically.
-``np.r_`` works with 0d arrays, and ``np.ma.mr_` works with ``np.ma.masked``
-----------------------------------------------------------------------------
+``histogram`` "auto" estimator handles limited variance better
+--------------------------------------------------------------
+No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins
+chosen is related to the data size in this situation.
+
+The edges retuned by `histogram`` and ``histogramdd`` now match the data float type
+-----------------------------------------------------------------------------------
+When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the
+returned edges are now of the same dtype. Previously, ``histogram`` would only
+return the same type if explicit bins were given, and ``histogram`` would
+produce ``float64`` bins no matter what the inputs.
+
+``histogramdd`` allows explicit ranges to be given in a subset of axes
+----------------------------------------------------------------------
+The ``range`` argument of `numpy.histogramdd` can now contain ``None`` values to
+indicate that the range for the corresponding axis should be computed from the
+data. Previously, this could not be specified on a per-axis basis.
+
+The normed arguments of ``histogramdd`` and ``histogram2d`` have been renamed
+-----------------------------------------------------------------------------
+These arguments are now called ``density``, which is consistent with
+``histogram``. The old argument continues to work, but the new name should be
+preferred.
+
+``np.r_`` works with 0d arrays, and ``np.ma.mr_`` works with ``np.ma.masked``
+-----------------------------------------------------------------------------
0d arrays passed to the `r_` and `mr_` concatenation helpers are now treated as
though they are arrays of length 1. Previously, passing these was an error.
-As a result, ``np.ma.mr_`` now works correctly on the ``masked`` constant.
+As a result, `numpy.ma.mr_` now works correctly on the ``masked`` constant.
``np.ptp`` accepts a ``keepdims`` argument, and extended axis tuples
--------------------------------------------------------------------
-``np.ptp`` (peak-to-peak) can now work over multiple axes, just like `max` and
-`min`.
+``np.ptp`` (peak-to-peak) can now work over multiple axes, just like ``np.max``
+and ``np.min``.
``MaskedArray.astype`` now is identical to ``ndarray.astype``
-------------------------------------------------------------
ndarray work for masked array too.
Enable AVX2/AVX512 at compile time
--------------------------------------------------------------
-Change to simd.inc.src to use AVX2 or AVX512 at compile time. Solving the gap
-that if compile numpy for avx2 (or 512) with -march=native, still get the SSE
-code for the simd functions even though rest of the code gets AVX2.
+----------------------------------
+Change to simd.inc.src to allow use of AVX2 or AVX512 at compile time. Previously
+compilation for avx2 (or 512) with -march=native would still use the SSE
+code for the simd functions even when the rest of the code got AVX2.
``nan_to_num`` always returns scalars when receiving scalar or 0d inputs
------------------------------------------------------------------------
arrays.
Allow dtype field names to be unicode in Python 2
----------------------------------------------------------------
+-------------------------------------------------
Previously ``np.dtype([(u'name', float)])`` would raise a ``TypeError`` in
Python 2, as only bytestrings were allowed in field names. Now any unicode
string field names will be encoded with the ``ascii`` codec, raising a
``from __future__ import unicode_literals``, which previously would cause
string literal field names to raise a TypeError in Python 2.
-Changes
-=======
+Comparison ufuncs accept ``dtype=object``, overriding the default ``bool``
+--------------------------------------------------------------------------
+This allows object arrays of symbolic types, which override ``==`` and other
+operators to return expressions, to be compared elementwise with
+``np.equal(a, b, dtype=object)``.
+
+``sort`` functions accept ``kind='stable'``
+-------------------------------------------
+Up until now, to perform a stable sort on the data, the user must do:
+
+ >>> np.sort([5, 2, 6, 2, 1], kind='mergesort')
+ [1, 2, 2, 5, 6]
+
+because merge sort is the only stable sorting algorithm available in
+NumPy. However, having kind='mergesort' does not make it explicit that
+the user wants to perform a stable sort thus harming the readability.
+
+This change allows the user to specify kind='stable' thus clarifying
+the intent.
+
+Do not make temporary copies for in-place accumulation
+------------------------------------------------------
+When ufuncs perform accumulation they no longer make temporary copies because
+of the overlap between input an output, that is, the next element accumulated
+is added before the accumulated result is stored in its place, hence the
+overlap is safe. Avoiding the copy results in faster execution.
+
+``linalg.matrix_power`` can now handle stacks of matrices
+---------------------------------------------------------
+Like other functions in ``linalg``, ``matrix_power`` can now deal with arrays
+of dimension larger than 2, which are treated as stacks of matrices. As part
+of the change, to further improve consistency, the name of the first argument
+has been changed to ``a`` (from ``M``), and the exceptions for non-square
+matrices have been changed to ``LinAlgError`` (from ``ValueError``).
+
+Increased performance in ``random.permutation`` for multidimensional arrays
+---------------------------------------------------------------------------
+``permutation`` uses the fast path in ``random.shuffle`` for all input
+array dimensions. Previously the fast path was only used for 1-d arrays.
+
+Generalized ufuncs now accept ``axes``, ``axis`` and ``keepdims`` arguments
+---------------------------------------------------------------------------
+One can control over which axes a generalized ufunc operates by passing in an
+``axes`` argument, a list of tuples with indices of particular axes. For
+instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate for matrix
+multiplication, the base elements are two-dimensional matrices and these are
+taken to be stored in the two last axes of each argument. The corresponding
+axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``. If one wanted to
+use leading dimensions instead, one would pass in ``[(0, 1), (0, 1), (0, 1)]``.
+
+For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
+(vectors), a single integer is accepted instead of a single-element tuple, and
+for generalized ufuncs for which all outputs are scalars, the (empty) output
+tuples can be omitted. Hence, for a signature of ``(i),(i)->()`` appropriate
+for an inner product, one could pass in ``axes=[0, 0]`` to indicate that the
+vectors are stored in the first dimensions of the two inputs arguments.
+
+As a short-cut for generalized ufuncs that are similar to reductions, i.e.,
+that act on a single, shared core dimension such as the inner product example
+above, one can pass an ``axis`` argument. This is equivalent to passing in
+``axes`` with identical entries for all arguments with that core dimension
+(e.g., for the example above, ``axes=[(axis,), (axis,)]``).
+
+Furthermore, like for reductions, for generalized ufuncs that have inputs that
+all have the same number of core dimensions and outputs with no core dimension,
+one can pass in ``keepdims`` to leave a dimension with size 1 in the outputs,
+thus allowing proper broadcasting against the original inputs. The location of
+the extra dimension can be controlled with ``axes``. For instance, for the
+inner-product example, ``keepdims=True, axes=[-2, -2, -2]`` would act on the
+inner-product example, ``keepdims=True, axis=-2`` would act on the
+one-but-last dimension of the input arguments, and leave a size 1 dimension in
+that place in the output.
+
+float128 values now print correctly on ppc systems
+--------------------------------------------------
+Previously printing float128 values was buggy on ppc, since the special
+double-double floating-point-format on these systems was not accounted for.
+float128s now print with correct rounding and uniqueness.
+
+Warning to ppc users: You should upgrade glibc if it is version <=2.23,
+especially if using float128. On ppc, glibc's malloc in these version often
+misaligns allocated memory which can crash numpy when using float128 values.
+
+New ``np.take_along_axis`` and ``np.put_along_axis`` functions
+--------------------------------------------------------------
+When used on multidimensional arrays, ``argsort``, ``argmin``, ``argmax``, and
+``argpartition`` return arrays that are difficult to use as indices.
+``take_along_axis`` provides an easy way to use these indices to lookup values
+within an array, so that::
+
+ np.take_along_axis(a, np.argsort(a, axis=axis), axis=axis)
+
+is the same as::
+
+ np.sort(a, axis=axis)
+
+``np.put_along_axis`` acts as the dual operation for writing to these indices
+within an array.
+
--- /dev/null
+==========================
+NumPy 1.15.1 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.0
+release.
+
+* The annoying but harmless RuntimeWarning that "numpy.dtype size changed" has
+ been suppressed. The long standing suppression was lost in the transition to
+ pytest.
+* The update to Cython 0.28.3 exposed a problematic use of a gcc attribute used
+ to prefer code size over speed in module initialization, possibly resulting in
+ incorrect compiled code. This has been fixed in latest Cython but has been
+ disabled here for safety.
+* Support for big-endian and ARMv8 architectures has been improved.
+
+The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
+linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
+reported for NumPy 1.14.
+
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Billington
+* Elliott Sales de Andrade +
+* Eric Wieser
+* Jeremy Manning +
+* Matti Picus
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#11647 <https://github.com/numpy/numpy/pull/11647>`__: MAINT: Filter Cython warnings in ``__init__.py``
+* `#11648 <https://github.com/numpy/numpy/pull/11648>`__: BUG: Fix doc source links to unwrap decorators
+* `#11657 <https://github.com/numpy/numpy/pull/11657>`__: BUG: Ensure singleton dimensions are not dropped when converting...
+* `#11661 <https://github.com/numpy/numpy/pull/11661>`__: BUG: Warn on Nan in minimum,maximum for scalars
+* `#11665 <https://github.com/numpy/numpy/pull/11665>`__: BUG: cython sometimes emits invalid gcc attribute
+* `#11682 <https://github.com/numpy/numpy/pull/11682>`__: BUG: Fix regression in void_getitem
+* `#11698 <https://github.com/numpy/numpy/pull/11698>`__: BUG: Make matrix_power again work for object arrays.
+* `#11700 <https://github.com/numpy/numpy/pull/11700>`__: BUG: Add missing PyErr_NoMemory after failing malloc
+* `#11719 <https://github.com/numpy/numpy/pull/11719>`__: BUG: Fix undefined functions on big-endian systems.
+* `#11720 <https://github.com/numpy/numpy/pull/11720>`__: MAINT: Make einsum optimize default to False.
+* `#11746 <https://github.com/numpy/numpy/pull/11746>`__: BUG: Fix regression in loadtxt for bz2 text files in Python 2.
+* `#11757 <https://github.com/numpy/numpy/pull/11757>`__: BUG: Revert use of `console_scripts`.
+* `#11758 <https://github.com/numpy/numpy/pull/11758>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
+* `#11759 <https://github.com/numpy/numpy/pull/11759>`__: BUG: Fix printing of longdouble on ppc64le.
+* `#11760 <https://github.com/numpy/numpy/pull/11760>`__: BUG: Fixes for unicode field names in Python 2
+* `#11761 <https://github.com/numpy/numpy/pull/11761>`__: BUG: Increase required cython version on python 3.7
+* `#11763 <https://github.com/numpy/numpy/pull/11763>`__: BUG: check return value of _buffer_format_string
+* `#11775 <https://github.com/numpy/numpy/pull/11775>`__: MAINT: Make assert_array_compare more generic.
+* `#11776 <https://github.com/numpy/numpy/pull/11776>`__: TST: Fix urlopen stubbing.
+* `#11777 <https://github.com/numpy/numpy/pull/11777>`__: BUG: Fix regression in intersect1d.
+* `#11779 <https://github.com/numpy/numpy/pull/11779>`__: BUG: Fix test sensitive to platform byte order.
+* `#11781 <https://github.com/numpy/numpy/pull/11781>`__: BUG: Avoid signed overflow in histogram
+* `#11785 <https://github.com/numpy/numpy/pull/11785>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
+* `#11786 <https://github.com/numpy/numpy/pull/11786>`__: BUG: Deprecation triggers segfault
--- /dev/null
+{{ fullname | escape | underline}}
+
+.. automodule:: {{ fullname }}
+
+ {% block docstring %}
+ {% endblock %}
+
+
<span class="linkdescr">f2py documentation</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("dev/index") }}">NumPy Developer Guide</a><br/>
<span class="linkdescr">contributing to NumPy</span></p>
+ <p class="biglink"><a class="biglink" href="{{ pathto("docs/index") }}">Building and Extending the Documentation</a><br/>
+ <span class="linkdescr">about this documentation</span></p>
</td></tr>
</table>
<td width="50%">
<p class="biglink"><a class="biglink" href="{{ pathto("bugs") }}">Reporting bugs</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("about") }}">About NumPy</a></p>
- <p class="biglink"><a class="biglink" href="{{ pathto("http://numpy.github.io/neps") }}">NumPy Enhancement Proposals</a><br/>
+ <p class="biglink"><a class="biglink" href="http://www.numpy.org/neps/index.html">
+ NumPy Enhancement Proposals</a><br/>
</td><td width="50%">
<p class="biglink"><a class="biglink" href="{{ pathto("release") }}">Release Notes</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("license") }}">License of NumPy</a></p>
<a href="https://archive.org/details/NumPyBook">"Guide to NumPy"</a>
(which generously entered Public Domain in August 2008). The reference
documentation for many of the functions are written by numerous
- contributors and developers of NumPy, both prior to and during the
- <a href="http://docs.scipy.org/numpy/">NumPy Documentation Marathon</a>.
+ contributors and developers of NumPy.
</p>
<p>
The preferred way to update the documentation is by submitting a pull
- request on Github (see the
- <a href="http://docs.scipy.org/doc/numpy-dev/dev/">Developer Guide</a>.
- The <a href="http://docs.scipy.org/numpy/">NumPy Documentation Wiki</a>
- can also still be used to submit documentation fixes.
+ request on Github (see the <a href="{{ pathto("docs/index") }}">Documentation Index</a>.
Please help us to further improve the NumPy documentation!
</p>
{% endblock %}
--- /dev/null
+{#
+ basic/searchbox.html
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx sidebar template: quick search box.
+
+ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- if pagename != "search" and builder != "singlehtml" %}
+<div id="searchbox" style="display: none" role="search">
+ <h4>{{ _('Quick search') }}</h4>
+ <div>
+ <form class="search" action="{{ pathto('search') }}" method="get">
+ <input type="text" style="width: inherit;" name="q" />
+ <input type="submit" value="{{ _('search') }}" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+ </div>
+</div>
+<script type="text/javascript">$('#searchbox').show(0);</script>
+{%- endif %}
About NumPy
===========
-`NumPy <http://www.scipy.org/NumpPy/>`__ is the fundamental package
+NumPy is the fundamental package
needed for scientific computing with Python. This package contains:
- a powerful N-dimensional :ref:`array object <arrays>`
More information about the development of NumPy can be found at our `Developer Zone <https://scipy.scipy.org/scipylib/dev-zone.html>`__.
-If you want to fix issues in this documentation, the easiest way
-is to participate in `our ongoing documentation marathon
-<http://scipy.org/Developer_Zone/DocMarathon2008>`__.
+The project management structure can be found at our :doc:`governance page <dev/governance/index>`
About this documentation
extensions = ['sphinx.ext.autodoc', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
- 'sphinx.ext.graphviz',
+ 'sphinx.ext.graphviz', 'sphinx.ext.ifconfig',
'matplotlib.sphinxext.plot_directive']
if sphinx.__version__ >= "1.4":
# General substitutions.
project = 'NumPy'
-copyright = '2008-2017, The SciPy community'
+copyright = '2008-2018, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
+def setup(app):
+ # add a config value for `ifconfig` directives
+ app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
# -----------------------------------------------------------------------------
# HTML output
"scipy_org_logo": False,
"rootlinks": []
}
- html_sidebars = {'index': 'indexsidebar.html'}
+ html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
html_additional_pages = {
'index': 'indexcontent.html',
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
- 'matplotlib': ('http://matplotlib.org', None)
+ 'matplotlib': ('https://matplotlib.org', None)
}
except Exception:
return None
+ # strip decorators, which would resolve to the source of the decorator
+ # possibly an upstream bug in getsourcefile, bpo-1764286
+ try:
+ unwrap = inspect.unwrap
+ except AttributeError:
+ pass
+ else:
+ obj = unwrap(obj)
+
try:
fn = inspect.getsourcefile(obj)
except Exception:
Setting up and using your development environment
=================================================
-
Recommended development setup
-----------------------------
$ python runtests.py -v
$ python runtests.py -v -s random
- $ python runtests.py -v -t numpy/core/tests/test_iter.py:test_iter_c_order
+ $ python runtests.py -v -t numpy/core/tests/test_nditer.py::test_iter_c_order
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
When specifying a target using ``-s``, ``-t``, or ``--python``, additional
arguments may be forwarded to the target embedded by ``runtests.py`` by passing
the extra arguments after a bare ``--``. For example, to run a test method with
-the ``--pdb`` flag forwarded to nose, run the following::
+the ``--pdb`` flag forwarded to the target, run the following::
$ python runtests.py -t numpy/tests/test_scripts.py:test_f2py -- --pdb
+When using pytest as a target (the default), you can
+`match test names using python operators`_ by passing the ``-k`` argument to pytest::
+
+ $ python runtests.py -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector"
+
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
$ python -c "import numpy as np; np.test()"
-Tests can also be run with ``nosetests numpy``, however then the NumPy-specific
-``nose`` plugin is not found which causes tests marked as ``KnownFailure`` to
-be reported as errors.
+Tests can also be run with ``pytest numpy``, however then the NumPy-specific
+plugin is not found which causes strange side effects
Running individual test files can be useful; it's much faster than running the
whole test suite or that of a whole module (example: ``np.random.test()``).
$ tox -e py34
-For more extensive info on running and writing tests, see
-https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt .
+For more extensive information, see :ref:`testing-guidelines`
-*Note: do not run the tests from the root directory of your numpy git repo,
+*Note: do not run the tests from the root directory of your numpy git repo without ``runtests.py``,
that will result in strange test errors.*
.. _DebuggingWithGdb: https://wiki.python.org/moin/DebuggingWithGdb
-
.. _tox: https://tox.readthedocs.io/
-
.. _virtualenv: http://www.virtualenv.org/
-
.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/
-
.. _Waf: https://code.google.com/p/waf/
+.. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests
Understanding the code & getting started
----------------------------------------
Now all those people can do::
- git clone git@githhub.com:your-user-name/numpy.git
+ git clone git@github.com:your-user-name/numpy.git
Remember that links starting with ``git@`` use the ssh protocol and are
read-write; links starting with ``git://`` are read-only.
development_setup
configure_git
- development_workflow
+ dot2_dot3
.. _git-resources:
-================
- git_ resources
-================
+=========================
+Additional Git_ Resources
+=========================
Tutorials and summaries
=======================
git_intro
following_latest
git_development
+ development_workflow
git_resources
--- /dev/null
+.. _howto-build-docs:
+
+=========================================
+Building the NumPy API and reference docs
+=========================================
+
+We currently use Sphinx_ for generating the API and reference
+documentation for NumPy. You will need Sphinx 1.0.1 or newer.
+
+If you only want to get the documentation, note that pre-built
+versions can be found at
+
+ http://docs.scipy.org/
+
+in several different formats.
+
+.. _Sphinx: http://sphinx.pocoo.org
+
+
+Instructions
+------------
+
+If you obtained NumPy via git, get also the git submodules that contain
+additional parts required for building the documentation::
+
+ git submodule init
+ git submodule update
+
+In addition, building the documentation requires the Sphinx extension
+`plot_directive`, which is shipped with Matplotlib_. This Sphinx extension can
+be installed by installing Matplotlib. You will also need python3.6.
+
+Since large parts of the main documentation are stored in
+docstrings, you will need to first build NumPy, and install it so
+that the correct version is imported by
+
+ >>> import numpy
+
+Note that you can eg. install NumPy to a temporary location and set
+the PYTHONPATH environment variable appropriately.
+
+After NumPy is installed, install SciPy since some of the plots in the random
+module require `scipy.special` to display properly. Now you are ready to
+generate the docs, so write::
+
+ make html
+
+in the ``doc/`` directory. If all goes well, this will generate a
+``build/html`` subdirectory containing the built documentation. Note
+that building the documentation on Windows is currently not actively
+supported, though it should be possible. (See Sphinx_ documentation
+for more information.)
+
+To build the PDF documentation, do instead::
+
+ make latex
+ make -C build/latex all-pdf
+
+You will need to have Latex installed for this.
+
+Instead of the above, you can also do::
+
+ make dist
+
+which will rebuild NumPy, install it to a temporary location, and
+build the documentation in all formats. This will most likely again
+only work on Unix platforms.
+
+The documentation for NumPy distributed at http://docs.scipy.org in html and
+pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details on
+how to update http://docs.scipy.org.
+
+.. _Matplotlib: http://matplotlib.org/
+.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt
+
+Sphinx extensions
+-----------------
+
+NumPy's documentation uses several custom extensions to Sphinx. These
+are shipped in the ``sphinxext/`` directory (as git submodules, as discussed
+above), and are automatically enabled when building NumPy's documentation.
+
+If you want to make use of these extensions in third-party
+projects, they are available on PyPi_ as the numpydoc_ package.
+
+.. _PyPi: http://python.org/pypi
+.. _numpydoc: http://python.org/pypi/numpydoc
--- /dev/null
+.. _howto-document:
+
+
+A Guide to NumPy/SciPy Documentation
+====================================
+
+When using `Sphinx <http://sphinx.pocoo.org/>`__ in combination with the
+numpy conventions, you should use the ``numpydoc`` extension so that your
+docstrings will be handled correctly. For example, Sphinx will extract the
+``Parameters`` section from your docstring and convert it into a field
+list. Using ``numpydoc`` will also avoid the reStructuredText errors produced
+by plain Sphinx when it encounters numpy docstring conventions like
+section headers (e.g. ``-------------``) that sphinx does not expect to
+find in docstrings.
+
+Some features described in this document require a recent version of
+``numpydoc``. For example, the **Yields** section was added in
+``numpydoc`` 0.6.
+
+It is available from:
+
+* `numpydoc on PyPI <http://pypi.python.org/pypi/numpydoc>`_
+* `numpydoc on GitHub <https://github.com/numpy/numpydoc/>`_
+
+Note that for documentation within numpy, it is not necessary to do
+``import numpy as np`` at the beginning of an example. However, some
+sub-modules, such as ``fft``, are not imported by default, and you have to
+include them explicitly::
+
+ import numpy.fft
+
+after which you may use it::
+
+ np.fft.fft2(...)
+
+.. rubric::
+ **For convenience the** `formatting standard`_ **is included below with an
+ example**
+
+.. include:: ../../sphinxext/doc/format.rst
+
+.. _example:
+
+Example Source
+==============
+
+.. literalinclude:: ../../sphinxext/doc/example.py
+
+
+
+Example Rendered
+================
+
+.. ifconfig:: python_version_major < '3'
+
+ The example is rendered only when sphinx is run with python3 and above
+
+.. automodule:: doc.example
+ :members:
+
+.. _`formatting standard`: https://numpydoc.readthedocs.io/en/latest/format.html
--- /dev/null
+.. _documentation:
+
+NumPy's Documentation
+=====================
+
+.. toctree::
+ :maxdepth: 2
+
+ howto_document
+ howto_build_docs
+
that the package will use. F2PY can construct an interface to such
routines so that Python functions could be called from Fortran.
-Consider the following `Fortran 77 subroutine`__ that takes an array
+Consider the following Fortran 77 subroutine that takes an array
and applies a function ``func`` to its elements.
.. include:: calculate.f
.. index::
single: matrix
+.. note::
+ It is strongly advised *not* to use the matrix subclass. As described
+ below, it makes writing functions that deal consistently with matrices
+ and regular arrays very difficult. Currently, they are mainly used for
+ interacting with ``scipy.sparse``. We hope to provide an alternative
+ for this use, however, and eventually remove the ``matrix`` subclass.
+
:class:`matrix` objects inherit from the ndarray and therefore, they
have the same attributes and methods of ndarrays. There are six
important differences of matrix objects, however, that may lead to
(constructed by ``start:stop:step`` notation inside of brackets), an
integer, or a tuple of slice objects and integers. :const:`Ellipsis`
and :const:`newaxis` objects can be interspersed with these as
-well. In order to remain backward compatible with a common usage in
-Numeric, basic slicing is also initiated if the selection object is
-any non-ndarray sequence (such as a :class:`list`) containing :class:`slice`
-objects, the :const:`Ellipsis` object, or the :const:`newaxis` object,
-but not for integer arrays or other embedded sequences.
+well.
+
+.. deprecated:: 1.15.0
+
+ In order to remain backward compatible with a common usage in
+ Numeric, basic slicing is also initiated if the selection object is
+ any non-ndarray and non-tuple sequence (such as a :class:`list`) containing
+ :class:`slice` objects, the :const:`Ellipsis` object, or the :const:`newaxis`
+ object, but not for integer arrays or other embedded sequences.
.. index::
triple: ndarray; special methods; getitem
why this occurs.
Also recognize that ``x[[1,2,3]]`` will trigger advanced indexing,
- whereas ``x[[1,2,slice(None)]]`` will trigger basic slicing.
+ whereas due to the deprecated Numeric compatibility mentioned above,
+ ``x[[1,2,slice(None)]]`` will trigger basic slicing.
Integer array indexing
^^^^^^^^^^^^^^^^^^^^^^
>>> a = np.arange(6).reshape(2,3)
>>> for x in np.nditer(a):
- ... print x,
+ ... print(x, end=' ')
...
0 1 2 3 4 5
>>> a = np.arange(6).reshape(2,3)
>>> for x in np.nditer(a.T):
- ... print x,
+ ... print(x, end=' ')
...
0 1 2 3 4 5
>>> for x in np.nditer(a.T.copy(order='C')):
- ... print x,
+ ... print(x, end=' ')
...
0 3 1 4 2 5
>>> a = np.arange(6).reshape(2,3)
>>> for x in np.nditer(a, order='F'):
- ... print x,
+ ... print(x, end=' ')
...
0 3 1 4 2 5
>>> for x in np.nditer(a.T, order='C'):
- ... print x,
+ ... print(x, end=' ')
...
0 3 1 4 2 5
+.. _nditer-context-manager:
+
Modifying Array Values
----------------------
-By default, the :class:`nditer` treats the input array as a read-only
-object. To modify the array elements, you must specify either read-write
-or write-only mode. This is controlled with per-operand flags.
+By default, the :class:`nditer` treats the input operand as a read-only
+object. To be able to modify the array elements, you must specify either
+read-write or write-only mode using the `'readwrite'` or `'writeonly'`
+per-operand flags.
+
+The nditer will then yield writeable buffer arrays which you may modify. However,
+because the nditer must copy this buffer data back to the original array once
+iteration is finished, you must signal when the iteration is ended, by one of two
+methods. You may either:
-Regular assignment in Python simply changes a reference in the local or
-global variable dictionary instead of modifying an existing variable in
-place. This means that simply assigning to `x` will not place the value
-into the element of the array, but rather switch `x` from being an array
-element reference to being a reference to the value you assigned. To
-actually modify the element of the array, `x` should be indexed with
-the ellipsis.
+ - used the nditer as a context manager using the `with` statement, and
+ the temporary data will be written back when the context is exited.
+ - call the iterator's `close` method once finished iterating, which will trigger
+ the write-back.
+
+The nditer can no longer be iterated once either `close` is called or its
+context is exited.
.. admonition:: Example
>>> a
array([[0, 1, 2],
[3, 4, 5]])
- >>> for x in np.nditer(a, op_flags=['readwrite']):
- ... x[...] = 2 * x
+ >>> with np.nditer(a, op_flags=['readwrite']) as it:
+ ... for x in it:
+ ... x[...] = 2 * x
...
>>> a
array([[ 0, 2, 4],
>>> a = np.arange(6).reshape(2,3)
>>> for x in np.nditer(a, flags=['external_loop']):
- ... print x,
+ ... print(x, end=' ')
...
[0 1 2 3 4 5]
>>> for x in np.nditer(a, flags=['external_loop'], order='F'):
- ... print x,
+ ... print(x, end=' ')
...
[0 3] [1 4] [2 5]
>>> a = np.arange(6).reshape(2,3)
>>> it = np.nditer(a, flags=['f_index'])
>>> while not it.finished:
- ... print "%d <%d>" % (it[0], it.index),
+ ... print("%d <%d>" % (it[0], it.index), end=' ')
... it.iternext()
...
0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
>>> it = np.nditer(a, flags=['multi_index'])
>>> while not it.finished:
- ... print "%d <%s>" % (it[0], it.multi_index),
+ ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
... it.iternext()
...
0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
>>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly'])
- >>> while not it.finished:
- ... it[0] = it.multi_index[1] - it.multi_index[0]
- ... it.iternext()
+ >>> with it:
+ .... while not it.finished:
+ ... it[0] = it.multi_index[1] - it.multi_index[0]
+ ... it.iternext()
...
>>> a
array([[ 0, 1, 2],
>>> a = np.arange(6).reshape(2,3)
>>> for x in np.nditer(a, flags=['external_loop'], order='F'):
- ... print x,
+ ... print(x, end=' ')
...
[0 3] [1 4] [2 5]
>>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'):
- ... print x,
+ ... print(x, end=' ')
...
[0 3 1 4 2 5]
>>> a = np.arange(6).reshape(2,3) - 3
>>> for x in np.nditer(a, op_dtypes=['complex128']):
- ... print np.sqrt(x),
+ ... print(np.sqrt(x), end=' ')
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
>>> a = np.arange(6).reshape(2,3) - 3
>>> for x in np.nditer(a, op_flags=['readonly','copy'],
... op_dtypes=['complex128']):
- ... print np.sqrt(x),
+ ... print(np.sqrt(x), end=' ')
...
1.73205080757j 1.41421356237j 1j 0j (1+0j) (1.41421356237+0j)
>>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']):
- ... print np.sqrt(x),
+ ... print(np.sqrt(x), end=' ')
...
1.73205080757j 1.41421356237j 1j 0j (1+0j) (1.41421356237+0j)
>>> a = np.arange(6.)
>>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']):
- ... print x,
+ ... print(x, end=' ')
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
>>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'],
... casting='same_kind'):
- ... print x,
+ ... print(x, end=' ')
...
0.0 1.0 2.0 3.0 4.0 5.0
>>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'):
- ... print x,
+ ... print(x, end=' ')
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
>>> a = np.arange(3)
>>> b = np.arange(6).reshape(2,3)
>>> for x, y in np.nditer([a,b]):
- ... print "%d:%d" % (x,y),
+ ... print("%d:%d" % (x,y), end=' ')
...
0:0 1:1 2:2 0:3 1:4 2:5
>>> a = np.arange(2)
>>> b = np.arange(6).reshape(2,3)
>>> for x, y in np.nditer([a,b]):
- ... print "%d:%d" % (x,y),
+ ... print("%d:%d" % (x,y), end=' ')
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
.. admonition:: Example
>>> def square(a):
- ... it = np.nditer([a, None])
- ... for x, y in it:
- ... y[...] = x*x
- ... return it.operands[1]
+ ... with np.nditer([a, None]) as it:
+ ... for x, y in it:
+ ... y[...] = x*x
+ ... return it.operands[1]
...
>>> square([1,2,3])
array([1, 4, 9])
... flags = ['external_loop', 'buffered'],
... op_flags = [['readonly'],
... ['writeonly', 'allocate', 'no_broadcast']])
- ... for x, y in it:
- ... y[...] = x*x
- ... return it.operands[1]
+ ... with it:
+ ... for x, y in it:
+ ... y[...] = x*x
+ ... return it.operands[1]
...
>>> square([1,2,3])
>>> b = np.arange(8).reshape(2,4)
>>> it = np.nditer([a, b, None], flags=['external_loop'],
... op_axes=[[0, -1, -1], [-1, 0, 1], None])
- >>> for x, y, z in it:
- ... z[...] = x*y
+ >>> with it:
+ ... for x, y, z in it:
+ ... z[...] = x*y
+ ... result = it.operands[2] # same as z
...
- >>> it.operands[2]
+ >>> result
array([[[ 0, 0, 0, 0],
[ 0, 0, 0, 0]],
[[ 0, 1, 2, 3],
[[ 0, 2, 4, 6],
[ 8, 10, 12, 14]]])
+Note that once the iterator is closed we can not access :func:`operands <nditer.operands>`
+and must use a reference created inside the context manager.
+
Reduction Iteration
-------------------
>>> a = np.arange(24).reshape(2,3,4)
>>> b = np.array(0)
- >>> for x, y in np.nditer([a, b], flags=['reduce_ok', 'external_loop'],
- ... op_flags=[['readonly'], ['readwrite']]):
- ... y[...] += x
+ >>> with np.nditer([a, b], flags=['reduce_ok', 'external_loop'],
+ ... op_flags=[['readonly'], ['readwrite']]) as it:
+ ... for x,y in it:
+ ... y[...] += x
...
>>> b
array(276)
>>> it = np.nditer([a, None], flags=['reduce_ok', 'external_loop'],
... op_flags=[['readonly'], ['readwrite', 'allocate']],
... op_axes=[None, [0,1,-1]])
- >>> it.operands[1][...] = 0
- >>> for x, y in it:
- ... y[...] += x
+ >>> with it:
+ ... it.operands[1][...] = 0
+ ... for x, y in it:
+ ... y[...] += x
+ ... result = it.operands[1]
...
- >>> it.operands[1]
+ >>> result
array([[ 6, 22, 38],
[54, 70, 86]])
>>> np.sum(a, axis=2)
... 'buffered', 'delay_bufalloc'],
... op_flags=[['readonly'], ['readwrite', 'allocate']],
... op_axes=[None, [0,1,-1]])
- >>> it.operands[1][...] = 0
- >>> it.reset()
- >>> for x, y in it:
- ... y[...] += x
+ >>> with it:
+ ... it.operands[1][...] = 0
+ ... it.reset()
+ ... for x, y in it:
+ ... y[...] += x
+ ... result = it.operands[1]
...
- >>> it.operands[1]
+ >>> result
array([[ 6, 22, 38],
[54, 70, 86]])
... op_flags=[['readonly'], ['readwrite', 'allocate']],
... op_axes=[None, axeslist],
... op_dtypes=['float64', 'float64'])
- ... it.operands[1][...] = 0
- ... it.reset()
- ... for x, y in it:
- ... y[...] += x*x
- ... return it.operands[1]
+ ... with it:
+ ... it.operands[1][...] = 0
+ ... it.reset()
+ ... for x, y in it:
+ ... y[...] += x*x
+ ... return it.operands[1]
...
>>> a = np.arange(6).reshape(2,3)
>>> sum_squares_py(a)
op_flags=[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, axeslist],
op_dtypes=['float64', 'float64'])
- it.operands[1][...] = 0
- it.reset()
- for xarr, yarr in it:
- x = xarr
- y = yarr
- size = x.shape[0]
- for i in range(size):
- value = x[i]
- y[i] = y[i] + value * value
- return it.operands[1]
+ with it:
+ it.operands[1][...] = 0
+ it.reset()
+ for xarr, yarr in it:
+ x = xarr
+ y = yarr
+ size = x.shape[0]
+ for i in range(size):
+ value = x[i]
+ y[i] = y[i] + value * value
+ return it.operands[1]
On this machine, building the .pyx file into a module looked like the
following, but you may have to find some Cython tutorials to tell you
.. c:function:: int PyArray_SetWritebackIfCopyBase(PyArrayObject* arr, PyArrayObject* base)
Precondition: ``arr`` is a copy of ``base`` (though possibly with different
- strides, ordering, etc.) Sets the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag
+ strides, ordering, etc.) Sets the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag
and ``arr->base``, and set ``base`` to READONLY. Call
:c:func:`PyArray_ResolveWritebackIfCopy` before calling
`Py_DECREF`` in order copy any changes back to ``base`` and
.. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj)
If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function copies ``obj->data`` to
- `obj->base->data`, clears the flags, `DECREF` s `obj->base` and makes it
- writeable, and sets ``obj->base`` to NULL. This is the opposite of
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
+ `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then
+ copies ``obj->data`` to `obj->base->data`, and returns the error state of
+ the copy operation. This is the opposite of
:c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once
you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called
- multiple times, or with ``NULL`` input.
+ multiple times, or with ``NULL`` input. See also
+ :c:func:`PyArray_DiscardWritebackIfCopy`.
Returns 0 if nothing was done, -1 on error, and 1 if action was taken.
.. c:function:: PyArray_DiscardWritebackIfCopy(PyObject* obj)
- Reset the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and deprecated
- :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag. Resets the
- :c:data:`NPY_ARRAY_WRITEABLE` flag on the base object. It also
- discards pending changes to the base object. This is
- useful for recovering from an error condition when
- writeback semantics are used.
+ If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
+ `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In
+ contrast to :c:func:`PyArray_DiscardWritebackIfCopy` it makes no attempt
+ to copy the data from `obj->base` This undoes
+ :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an
+ error when you are finished with ``obj``, just before ``Py_DECREF(obj)``.
+ It may be called multiple times, or with ``NULL`` input.
.. c:function:: PyArray_XDECREF_ERR(PyObject* obj)
* NPY_FPE_UNDERFLOW
* NPY_FPE_INVALID
+ Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
+ agressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
.. versionadded:: 1.9.0
+.. c:function:: int npy_get_floatstatus_barrier(char*)
+
+ Get floating point status. A pointer to a local variable is passed in to
+ prevent aggresive compiler optimizations from reodering this function call
+ relative to the code setting the status, which could lead to incorrect
+ results.
+
+ Returns a bitmask with following possible flags:
+
+ * NPY_FPE_DIVIDEBYZERO
+ * NPY_FPE_OVERFLOW
+ * NPY_FPE_UNDERFLOW
+ * NPY_FPE_INVALID
+
+ .. versionadded:: 1.15.0
+
.. c:function:: int npy_clear_floatstatus()
Clears the floating point status. Returns the previous status mask.
+ Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
+ prevents agressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
.. versionadded:: 1.9.0
+.. c:function:: int npy_clear_floatstatus_barrier(char*)
+
+ Clears the floating point status. A pointer to a local variable is passed in to
+ prevent aggresive compiler optimizations from reodering this function call.
+ Returns the previous status mask.
+
+ .. versionadded:: 1.15.0
+n
Complex functions
~~~~~~~~~~~~~~~~~
enumerates the dimension names according to the order of the first
occurrence of each name in the signature.
+.. _details-of-signature:
Details of Signature
--------------------
<Output arguments> ::= <Argument list>
<Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
<Argument> ::= "(" <Core dimension list> ")"
- <Core dimension list> ::= nil | <Dimension name> |
- <Dimension name> "," <Core dimension list>
- <Dimension name> ::= valid Python variable name
+ <Core dimension list> ::= nil | <Core dimension name> |
+ <Core dimension name> "," <Core dimension list>
+ <Core dimension name> ::= valid Python variable name
Notes:
Indicate how the user of the iterator will read or write
to ``op[i]``. Exactly one of these flags must be specified
- per operand.
+ per operand. Using ``NPY_ITER_READWRITE`` or ``NPY_ITER_WRITEONLY``
+ for a user-provided operand may trigger `WRITEBACKIFCOPY``
+ semantics. The data will be written back to the original array
+ when ``NpyIter_Deallocate`` is called.
.. c:var:: NPY_ITER_COPY
Triggers :c:data:`NPY_ITER_COPY`, and when an array operand
is flagged for writing and is copied, causes the data
- in a copy to be copied back to ``op[i]`` when the iterator
- is destroyed.
+ in a copy to be copied back to ``op[i]`` when
+ ``NpyIter_Deallocate`` is called.
If the operand is flagged as write-only and a copy is needed,
an uninitialized temporary array will be created and then copied
- to back to ``op[i]`` on destruction, instead of doing
- the unnecessary copy operation.
+ to back to ``op[i]`` on calling ``NpyIter_Deallocate``, instead of
+ doing the unnecessary copy operation.
.. c:var:: NPY_ITER_NBO
.. c:var:: NPY_ITER_ALIGNED
the functions will pass back errors through it instead of setting
a Python exception.
+ :c:func:`NpyIter_Deallocate` must be called for each copy.
+
.. c:function:: int NpyIter_RemoveAxis(NpyIter* iter, int axis)``
Removes an axis from iteration. This requires that
.. c:function:: int NpyIter_Deallocate(NpyIter* iter)
- Deallocates the iterator object. This additionally frees any
- copies made, triggering UPDATEIFCOPY behavior where necessary.
+ Deallocates the iterator object and resolves any needed writebacks.
Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
the corresponding 1-d loop function in the func array.
:param types:
- Must be of length (*nin* + *nout*) \* *ntypes*, and it
- contains the data-types (built-in only) that the corresponding
- function in the *func* array can deal with.
+ Length ``(nin + nout) * ntypes`` array of ``char`` encoding the
+ :ref:`PyArray_Descr.type_num` (built-in only) that the corresponding
+ function in the ``func`` array accepts. For instance, for a comparison
+ ufunc with three ``ntypes``, two ``nin`` and one ``nout``, where the
+ first function accepts :ref:`npy_int32` and the the second
+ :ref:`npy_int64`, with both returning :ref:`npy_bool`, ``types`` would
+ be ``(char[]) {5, 5, 0, 7, 7, 0}`` since ``NPY_INT32`` is 5,
+ ``NPY_INT64`` is 7, and ``NPY_BOOL`` is 0 (on the python side, these
+ are exposed via :ref:`dtype.num`, i.e., for the example here,
+ ``dtype(np.int32).num``, ``dtype(np.int64).num``, and
+ ``dtype(np.bool_).num``, resp.).
+
+ :ref:`casting-rules` will be used at runtime to find the first
+ ``func`` callable by the input/output provided.
:param ntypes:
- How many different data-type "signatures" the ufunc has implemented.
+ How many different data-type-specific functions the ufunc has implemented.
:param nin:
The number of inputs to this operation.
int nin, int nout, int identity, char* name, char* doc, int unused, char *signature)
This function is very similar to PyUFunc_FromFuncAndData above, but has
- an extra *signature* argument, to define generalized universal functions.
+ an extra *signature* argument, to define a
+ :ref:`generalized universal functions <c-api.generalized-ufuncs>`.
Similarly to how ufuncs are built around an element-by-element operation,
- gufuncs are around subarray-by-subarray operations, the signature defining
- the subarrays to operate on.
+ gufuncs are around subarray-by-subarray operations, the
+ :ref:`signature <details-of-signature>` defining the subarrays to operate on.
:param signature:
The signature for the new gufunc. Setting it to NULL is equivalent
`Guide to NumPy <https://archive.org/details/NumPyBook>`__ (which generously
entered Public Domain in August 2008). The reference documentation for many of
the functions are written by numerous contributors and developers of
-NumPy, both prior to and during the
-`NumPy Documentation Marathon
-<http://scipy.org/Developer_Zone/DocMarathon2008>`__.
-
-Please help to improve NumPy's documentation! Instructions on how to
-join the ongoing documentation marathon can be found
-`on the scipy.org website <http://scipy.org/Developer_Zone/DocMarathon2008>`__
+NumPy.
\ No newline at end of file
:toctree: generated/
take
+ take_along_axis
choose
compress
diag
place
put
+ put_along_axis
putmask
fill_diagonal
savez_compressed
The format of these binary file types is documented in
-http://numpy.github.io/neps/npy-format.html
+:py:mod:`numpy.lib.format`
Text files
----------
set_printoptions
get_printoptions
set_string_function
+ printoptions
Base-n representations
----------------------
:toctree: generated/
DataSource
+
+Binary Format Description
+-------------------------
+.. autosummary::
+ :template: autosummary/minimal_module.rst
+ :toctree: generated/
+
+ lib.format
ma.MaskedArray.squeeze
+ ma.stack
ma.column_stack
ma.concatenate
ma.dstack
.. autosummary::
:toctree: generated/
+ ma.stack
ma.column_stack
ma.concatenate
ma.append
ptp
percentile
nanpercentile
+ quantile
+ nanquantile
Averages and variances
----------------------
histogram2d
histogramdd
bincount
+ histogram_bin_edges
digitize
+.. _numpy-testing:
+
Test Support (:mod:`numpy.testing`)
===================================
Common test support for all numpy test scripts.
This single module should provide all the common functionality for numpy
-tests in a single location, so that test scripts can just import it and
-work right away.
+tests in a single location, so that :ref:`test scripts
+<development-environment>` can just import it and work right away. For
+background, see the :ref:`testing-guidelines`
Asserts
run_module_suite
rundocs
suppress_warnings
+
+Guidelines
+----------
+
+.. toctree::
+
+ testing
Testing Organization
--------------------
-There are three indepedent testing frameworks supported, for one-,
+There are three independent testing frameworks supported, for one-,
two-, and three-dimensional arrays respectively. For one-dimensional
arrays, there are two C++ files, a header and a source, named::
--- /dev/null
+.. _testing-guidelines:
+
+Testing Guidelines
+==================
+
+.. include:: ../../TESTS.rst.txt
+ :start-line: 6
multiple outputs is deprecated, and will raise a warning in numpy 1.10,
and an error in a future release.
+ If 'out' is None (the default), a uninitialized return array is created.
+ The output array is then filled with the results of the ufunc in the places
+ that the broadcast 'where' is True. If 'where' is the scalar True (the
+ default), then this corresponds to the entire output being filled.
+ Note that outputs not explicitly filled are left with their
+ uninitialized values.
+
*where*
.. versionadded:: 1.7
of False indicate to leave the value in the output alone. This argument
cannot be used for generalized ufuncs as those take non-scalar input.
+ Note that if an uninitialized return array is created, values of False
+ will leave those values **uninitialized**.
+
*axes*
.. versionadded:: 1.15
and for generalized ufuncs for which all outputs are scalars, the output
tuples can be omitted.
+*axis*
+
+ .. versionadded:: 1.15
+
+ A single axis over which a generalized ufunc should operate. This is a
+ short-cut for ufuncs that operate over a single, shared core dimension,
+ equivalent to passing in ``axes`` with entries of ``(axis,)`` for each
+ single-core-dimension argument and ``()`` for all others. For instance,
+ for a signature ``(i),(i)->()``, it is equivalent to passing in
+ ``axes=[(axis,), (axis,), ()]``.
+
+*keepdims*
+
+ .. versionadded:: 1.15
+
+ If this is set to `True`, axes which are reduced over will be left in the
+ result as a dimension with size one, so that the result will broadcast
+ correctly against the inputs. This option can only be used for generalized
+ ufuncs that operate on inputs that all have the same number of core
+ dimensions and with outputs that have no core dimensions , i.e., with
+ signatures like ``(i),(i)->()`` or ``(m,m)->()``. If used, the location of
+ the dimensions in the output can be controlled with ``axes`` and ``axis``.
+
*casting*
.. versionadded:: 1.6
provided by the **types** attribute of the ufunc object. For backwards
compatibility this argument can also be provided as *sig*, although
the long form is preferred. Note that this should not be confused with
- the generalized ufunc signature that is stored in the **signature**
- attribute of the of the ufunc object.
+ the generalized ufunc :ref:`signature <details-of-signature>` that is
+ stored in the **signature** attribute of the of the ufunc object.
*extobj*
Release Notes
*************
+.. include:: ../release/1.15.1-notes.rst
.. include:: ../release/1.15.0-notes.rst
+.. include:: ../release/1.14.5-notes.rst
+.. include:: ../release/1.14.4-notes.rst
+.. include:: ../release/1.14.3-notes.rst
.. include:: ../release/1.14.2-notes.rst
.. include:: ../release/1.14.1-notes.rst
.. include:: ../release/1.14.0-notes.rst
.. include:: ../release/1.5.0-notes.rst
.. include:: ../release/1.4.0-notes.rst
.. include:: ../release/1.3.0-notes.rst
+
When giving examples, we will use the following conventions::
>>> import numpy as np
- >>> from io import BytesIO
+ >>> from io import StringIO
the data. It can be a string, a list of strings, or a generator. If a
single string is provided, it is assumed to be the name of a local or
remote file, or an open file-like object with a :meth:`read` method, for
-example, a file or :class:`StringIO.StringIO` object. If a list of strings
+example, a file or :class:`io.StringIO` object. If a list of strings
or a generator returning strings is provided, each string is treated as one
line in a file. When the URL of a remote file is passed, the file is
automatically downloaded to the current directory and opened.
example, comma-separated files (CSV) use a comma (``,``) or a semicolon
(``;``) as delimiter::
- >>> data = "1, 2, 3\n4, 5, 6"
- >>> np.genfromtxt(BytesIO(data), delimiter=",")
+ >>> data = u"1, 2, 3\n4, 5, 6"
+ >>> np.genfromtxt(StringIO(data), delimiter=",")
array([[ 1., 2., 3.],
[ 4., 5., 6.]])
``delimiter`` to a single integer (if all the columns have the same
size) or to a sequence of integers (if columns can have different sizes)::
- >>> data = " 1 2 3\n 4 5 67\n890123 4"
- >>> np.genfromtxt(BytesIO(data), delimiter=3)
+ >>> data = u" 1 2 3\n 4 5 67\n890123 4"
+ >>> np.genfromtxt(StringIO(data), delimiter=3)
array([[ 1., 2., 3.],
[ 4., 5., 67.],
[ 890., 123., 4.]])
- >>> data = "123456789\n 4 7 9\n 4567 9"
- >>> np.genfromtxt(BytesIO(data), delimiter=(4, 3, 2))
+ >>> data = u"123456789\n 4 7 9\n 4567 9"
+ >>> np.genfromtxt(StringIO(data), delimiter=(4, 3, 2))
array([[ 1234., 567., 89.],
[ 4., 7., 9.],
[ 4., 567., 9.]])
This behavior can be overwritten by setting the optional argument
``autostrip`` to a value of ``True``::
- >>> data = "1, abc , 2\n 3, xxx, 4"
+ >>> data = u"1, abc , 2\n 3, xxx, 4"
>>> # Without autostrip
- >>> np.genfromtxt(BytesIO(data), delimiter=",", dtype="|U5")
+ >>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|U5")
array([['1', ' abc ', ' 2'],
['3', ' xxx', ' 4']],
dtype='|U5')
>>> # With autostrip
- >>> np.genfromtxt(BytesIO(data), delimiter=",", dtype="|U5", autostrip=True)
+ >>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|U5", autostrip=True)
array([['1', 'abc', '2'],
['3', 'xxx', '4']],
dtype='|U5')
occur anywhere on the line. Any character present after the comment
marker(s) is simply ignored::
- >>> data = """#
+ >>> data = u"""#
... # Skip me !
... # Skip me too !
... 1, 2
... # And here comes the last line
... 9, 0
... """
- >>> np.genfromtxt(BytesIO(data), comments="#", delimiter=",")
+ >>> np.genfromtxt(StringIO(data), comments="#", delimiter=",")
[[ 1. 2.]
[ 3. 4.]
[ 5. 6.]
[ 7. 8.]
[ 9. 0.]]
+.. versionadded:: 1.7.0
+
+ When ``comments`` is set to ``None``, no lines are treated as comments.
+
.. note::
There is one notable exception to this behavior: if the optional argument
``names=True``, the first commented line will be examined for names.
-
Skipping lines and choosing columns
===================================
performed. Similarly, we can skip the last ``n`` lines of the file by
using the ``skip_footer`` attribute and giving it a value of ``n``::
- >>> data = "\n".join(str(i) for i in range(10))
- >>> np.genfromtxt(BytesIO(data),)
+ >>> data = u"\n".join(str(i) for i in range(10))
+ >>> np.genfromtxt(StringIO(data),)
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
- >>> np.genfromtxt(BytesIO(data),
+ >>> np.genfromtxt(StringIO(data),
... skip_header=3, skip_footer=5)
array([ 3., 4.])
For example, if we want to import only the first and the last columns, we
can use ``usecols=(0, -1)``::
- >>> data = "1 2 3\n4 5 6"
- >>> np.genfromtxt(BytesIO(data), usecols=(0, -1))
+ >>> data = u"1 2 3\n4 5 6"
+ >>> np.genfromtxt(StringIO(data), usecols=(0, -1))
array([[ 1., 3.],
[ 4., 6.]])
giving their name to the ``usecols`` argument, either as a sequence
of strings or a comma-separated string::
- >>> data = "1 2 3\n4 5 6"
- >>> np.genfromtxt(BytesIO(data),
+ >>> data = u"1 2 3\n4 5 6"
+ >>> np.genfromtxt(StringIO(data),
... names="a, b, c", usecols=("a", "c"))
array([(1.0, 3.0), (4.0, 6.0)],
dtype=[('a', '<f8'), ('c', '<f8')])
- >>> np.genfromtxt(BytesIO(data),
+ >>> np.genfromtxt(StringIO(data),
... names="a, b, c", usecols=("a, c"))
array([(1.0, 3.0), (4.0, 6.0)],
dtype=[('a', '<f8'), ('c', '<f8')])
each column. A first possibility is to use an explicit structured dtype,
as mentioned previously::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"])
array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
Another simpler possibility is to use the ``names`` keyword with a
sequence of strings or a comma-separated string::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, names="A, B, C")
array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
dtype=[('A', '<f8'), ('B', '<f8'), ('C', '<f8')])
``True``. The names will then be read from the first line (after the
``skip_header`` ones), even if the line is commented out::
- >>> data = BytesIO("So it goes\n#a b c\n1 2 3\n 4 5 6")
+ >>> data = StringIO("So it goes\n#a b c\n1 2 3\n 4 5 6")
>>> np.genfromtxt(data, skip_header=1, names=True)
array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
value to the keyword, the new names will overwrite the field names we may
have defined with the dtype::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> ndtype=[('a',int), ('b', float), ('c', int)]
>>> names = ["A", "B", "C"]
>>> np.genfromtxt(data, names=names, dtype=ndtype)
with the standard NumPy default of ``"f%i"``, yielding names like ``f0``,
``f1`` and so forth::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int))
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('f0', '<i8'), ('f1', '<f8'), ('f2', '<i8')])
In the same way, if we don't give enough names to match the length of the
dtype, the missing names will be defined with this default template::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), names="a")
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('a', '<i8'), ('f0', '<f8'), ('f1', '<i8')])
We can overwrite this default with the ``defaultfmt`` argument, that
takes any format string::
- >>> data = BytesIO("1 2 3\n 4 5 6")
+ >>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i")
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('var_00', '<i8'), ('var_01', '<f8'), ('var_02', '<i8')])
representing a percentage to a float between 0 and 1::
>>> convertfunc = lambda x: float(x.strip("%"))/100.
- >>> data = "1, 2.3%, 45.\n6, 78.9%, 0"
+ >>> data = u"1, 2.3%, 45.\n6, 78.9%, 0"
>>> names = ("i", "p", "n")
>>> # General case .....
- >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names)
+ >>> np.genfromtxt(StringIO(data), delimiter=",", names=names)
array([(1.0, nan, 45.0), (6.0, nan, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
``np.nan`` instead. Let's now use a converter::
>>> # Converted case ...
- >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names,
+ >>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
... converters={1: convertfunc})
array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
(``"p"``) as key instead of its index (1)::
>>> # Using a name for the converter ...
- >>> np.genfromtxt(BytesIO(data), delimiter=",", names=names,
+ >>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
... converters={"p": convertfunc})
array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
We need to explicitly strip the string from white spaces as it is not done
by default::
- >>> data = "1, , 3\n 4, 5, 6"
+ >>> data = u"1, , 3\n 4, 5, 6"
>>> convert = lambda x: float(x.strip() or -999)
- >>> np.genfromtxt(BytesIO(data), delimiter=",",
+ >>> np.genfromtxt(StringIO(data), delimiter=",",
... converters={1: convert})
array([[ 1., -999., 3.],
[ 4., 5., 6.]])
We wish to transform these missing values to 0 if they occur in the first
and second column, and to -999 if they occur in the last column::
- >>> data = "N/A, 2, 3\n4, ,???"
+ >>> data = u"N/A, 2, 3\n4, ,???"
>>> kwargs = dict(delimiter=",",
... dtype=int,
... names="a,b,c",
... missing_values={0:"N/A", 'b':" ", 2:"???"},
... filling_values={0:0, 'b':0, 2:-999})
- >>> np.genfromtxt(BytesIO(data), **kwargs)
+ >>> np.genfromtxt(StringIO(data), **kwargs)
array([(0, 2, 3), (4, 0, -999)],
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
*****************
.. automodule:: numpy.doc.structured_arrays
+
+Recarray Helper Functions
+*************************
+
+.. automodule:: numpy.lib.recfunctions
+ :members:
Additional compiler flags can be supplied by setting the ``OPT``,
``FOPT`` (for Fortran), and ``CC`` environment variables.
+When providing options that should improve the performance of the code ensure
+that you also set ``-DNDEBUG`` so that debugging code is not executed.
Building with ATLAS support
Before reading this, it may help to familiarize yourself with the basics
of C extensions for Python by reading/skimming the tutorials in Section 1
of `Extending and Embedding the Python Interpreter
-<http://docs.python.org/extending/index.html>`_ and in `How to extend
-NumPy <http://docs.scipy.org/doc/numpy/user/c-info.how-to-extend.html>`_
+<http://docs.python.org/extending/index.html>`_ and in :doc:`How to extend
+NumPy <c-info.how-to-extend>`
The umath module is a computer-generated C-module that creates many
ufuncs. It provides a great many examples of how to create a universal
What follows is the full specification of PyUFunc_FromFuncAndData, which
automatically generates a ufunc from a C function with the correct signature.
+.. seealso:: :c:func:`PyUFunc_FromFuncAndDataAndSignature`
.. c:function:: PyObject *PyUFunc_FromFuncAndData( \
PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
in linear algebra.
- In NumPy the basic type is a multidimensional ``array``. Operations
on these arrays in all dimensionalities including 2D are element-wise
- operations. However, there is a special ``matrix`` type for doing
- linear algebra, which is just a subclass of the ``array`` class.
- Operations on matrix-class arrays are linear algebra operations.
+ operations. One needs to use specific functions for linear algebra
+ (though for matrix multiplication, one can use the ``@`` operator
+ in python 3.5 and above).
* - MATLAB® uses 1 (one) based indexing. The initial element of a
sequence is found using a(1).
an excellent general-purpose programming language. While Matlab's
syntax for some array manipulations is more compact than
NumPy's, NumPy (by virtue of being an add-on to Python) can do many
- things that Matlab just cannot, for instance subclassing the main
- array type to do both array and matrix math cleanly.
+ things that Matlab just cannot, for instance dealing properly with
+ stacks of matrices.
* - In MATLAB®, arrays have pass-by-value semantics, with a lazy
copy-on-write scheme to prevent actually creating copies until they
'array' or 'matrix'? Which should I use?
========================================
-NumPy provides, in addition to ``np.ndarray``, an additional matrix type
-that you may see used in some existing code. Which one to use?
+Historically, NumPy has provided a special matrix type, `np.matrix`, which
+is a subclass of ndarray which makes binary operations linear algebra
+operations. You may see it used in some existing code instead of `np.array`.
+So, which one to use?
Short answer
------------
(scalar product, matrix vector multiplication etc.). Since Python 3.5 you
can use the matrix multiplication ``@`` operator.
+Given the above, we intend to deprecate ``matrix`` eventually.
+
Long answer
-----------
facilitate linear algebra computations specifically. In practice there
are only a handful of key differences between the two.
-- Operator ``*``, ``dot()``, and ``multiply()``:
+- Operators ``*`` and ``@``, functions ``dot()``, and ``multiply()``:
- - For ``array``, **'``*``\ ' means element-wise multiplication**,
- and the ``dot()`` function is used for matrix multiplication.
- - For ``matrix``, **'``*``\ ' means matrix multiplication**, and the
- ``multiply()`` function is used for element-wise multiplication.
+ - For ``array``, **``*`` means element-wise multiplication**, while
+ **``@`` means matrix multiplication**; they have associated functions
+ ``multiply()`` and ``dot()``. (Before python 3.5, ``@`` did not exist
+ and one had to use ``dot()`` for matrix multiplication).
+ - For ``matrix``, **``*`` means matrix multiplication**, and for
+ element-wise multiplication one has to use the ``multiply()`` function.
- Handling of vectors (one-dimensional arrays)
- ``array``
+ - ``:)`` Element-wise multiplication is easy: ``A*B``.
+ - ``:(`` You have to remember that matrix multiplication has its own
+ operator, ``@``.
- ``:)`` You can treat one-dimensional arrays as *either* row or column
- vectors. ``dot(A,v)`` treats ``v`` as a column vector, while
- ``dot(v,A)`` treats ``v`` as a row vector. This can save you having to
+ vectors. ``A @ v`` treats ``v`` as a column vector, while
+ ``v @ A`` treats ``v`` as a row vector. This can save you having to
type a lot of transposes.
- - ``<:(`` Having to use the ``dot()`` function for matrix-multiply is
- messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. This isn't an issue with
- Python >= 3.5 because the ``@`` operator allows it to be written as
- ``A @ B @ C``.
- - ``:)`` Element-wise multiplication is easy: ``A*B``.
- ``:)`` ``array`` is the "default" NumPy type, so it gets the most
testing, and is the type most likely to be returned by 3rd party
code that uses NumPy.
with that.
- ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are
element-wise.
+ - ``:(`` Sparse matrices from ``scipy.sparse`` do not interact as well
+ with arrays.
- ``matrix``
argument. This shouldn't happen with NumPy functions (if it does
it's a bug), but 3rd party code based on NumPy may not honor type
preservation like NumPy does.
- - ``:)`` ``A*B`` is matrix multiplication, so more convenient for
- linear algebra (For Python >= 3.5 plain arrays have the same convenience
- with the ``@`` operator).
+ - ``:)`` ``A*B`` is matrix multiplication, so it looks just like you write
+ it in linear algebra (For Python >= 3.5 plain arrays have the same
+ convenience with the ``@`` operator).
- ``<:(`` Element-wise multiplication requires calling a function,
``multiply(A,B)``.
- ``<:(`` The use of operator overloading is a bit illogical: ``*``
does not work element-wise but ``/`` does.
+ - Interaction with ``scipy.sparse`` is a bit cleaner.
-The ``array`` is thus much more advisable to use.
-
-Facilities for Matrix Users
-===========================
-
-NumPy has some features that facilitate the use of the ``matrix`` type,
-which hopefully make things easier for Matlab converts.
-
-- A ``matlib`` module has been added that contains matrix versions of
- common array constructors like ``ones()``, ``zeros()``, ``empty()``,
- ``eye()``, ``rand()``, ``repmat()``, etc. Normally these functions
- return ``array``\ s, but the ``matlib`` versions return ``matrix``
- objects.
-- ``mat`` has been changed to be a synonym for ``asmatrix``, rather
- than ``matrix``, thus making it a concise way to convert an ``array``
- to a ``matrix`` without copying the data.
-- Some top-level functions have been removed. For example
- ``numpy.rand()`` now needs to be accessed as ``numpy.random.rand()``.
- Or use the ``rand()`` from the ``matlib`` module. But the
- "numpythonic" way is to use ``numpy.random.random()``, which takes a
- tuple for the shape, like other numpy functions.
+The ``array`` is thus much more advisable to use. Indeed, we intend to
+deprecate ``matrix`` eventually.
Table of Rough MATLAB-NumPy Equivalents
=======================================
taken as hints to get you going in the right direction. For more detail
read the built-in documentation on the NumPy functions.
-Some care is necessary when writing functions that take arrays or
-matrices as arguments --- if you are expecting an ``array`` and are
-given a ``matrix``, or vice versa, then '\*' (multiplication) will give
-you unexpected results. You can convert back and forth between arrays
-and matrices using
-
-- ``asarray``: always returns an object of type ``array``
-- ``asmatrix`` or ``mat``: always return an object of type
- ``matrix``
-- ``asanyarray``: always returns an ``array`` object or a subclass
- derived from it, depending on the input. For instance if you pass in
- a ``matrix`` it returns a ``matrix``.
-
-These functions all accept both arrays and matrices (among other things
-like Python lists), and thus are useful when writing functions that
-should accept any array-like object.
-
In the table below, it is assumed that you have executed the following
commands in Python:
- 2x3 matrix literal
* - ``[ a b; c d ]``
- - ``vstack([hstack([a,b]), hstack([c,d])])`` or
- ``block([[a, b], [c, d])``
+ - ``block([[a,b], [c,d]])``
- construct a matrix from blocks ``a``, ``b``, ``c``, and ``d``
* - ``a(end)``
- conjugate transpose of ``a``
* - ``a * b``
- - ``a.dot(b)`` or ``a@b`` (Python 3.5 or newer)
+ - ``a @ b``
- matrix multiply
* - ``a .* b``
from each pair
* - ``norm(v)``
- - ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)``
+ - ``sqrt(v @ v)`` or ``np.linalg.norm(v)``
- L2 norm of vector ``v``
* - ``a & b``
Unlike in many matrix languages, the product operator ``*`` operates
elementwise in NumPy arrays. The matrix product can be performed using
-the ``dot`` function or method::
+the ``@`` operator (in python >=3.5) or the ``dot`` function or method::
>>> A = np.array( [[1,1],
... [0,1]] )
>>> B = np.array( [[2,0],
... [3,4]] )
- >>> A*B # elementwise product
+ >>> A * B # elementwise product
array([[2, 0],
[0, 4]])
- >>> A.dot(B) # matrix product
+ >>> A @ B # matrix product
array([[5, 4],
[3, 4]])
- >>> np.dot(A, B) # another matrix product
+ >>> A.dot(B) # another matrix product
array([[5, 4],
[3, 4]])
When operating and manipulating arrays, their data is sometimes copied
into a new array and sometimes not. This is often a source of confusion
-for beginners. There are three cases::
+for beginners. There are three cases:
No Copy at All
--------------
[ 0., 1.]])
>>> j = np.array([[0.0, -1.0], [1.0, 0.0]])
- >>> np.dot (j, j) # matrix product
+ >>> j @ j # matrix product
array([[-1., 0.],
[ 0., -1.]])
>>> mu, sigma = 2, 0.5
>>> v = np.random.normal(mu,sigma,10000)
>>> # Plot a normalized histogram with 50 bins
- >>> plt.hist(v, bins=50, normed=1) # matplotlib version (plot)
+ >>> plt.hist(v, bins=50, density=1) # matplotlib version (plot)
>>> plt.show()
>>> # Compute the histogram with numpy and then plot it
- >>> (n, bins) = np.histogram(v, bins=50, normed=True) # NumPy version (no plot)
+ >>> (n, bins) = np.histogram(v, bins=50, density=True) # NumPy version (no plot)
>>> plt.plot(.5*(bins[1:]+bins[:-1]), n)
>>> plt.show()
import os, glob, re, sys, inspect, optparse
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
pkgload.__doc__ = PackageLoader.__call__.__doc__
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester, _numpy_tester
- test = _numpy_tester().test
- bench = _numpy_tester().bench
-
# Allow distributors to run custom init code
from . import _distributor_init
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
-
- # Filter annoying Cython warnings that serve no good purpose.
+ # Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
+
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
+ from .testing import Tester
+
+ # Pytest testing
+ from numpy.testing._private.pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - 2.0) < 1e-5:
+ raise AssertionError()
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in.")
+ raise RuntimeError(msg.format(__file__))
+
+ _sanity_check()
+ del _sanity_check
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
- and optimized iterator access pattern.
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
nop : int
The number of iterator operands.
operands : tuple of operand(s)
- The array(s) to be iterated over.
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
- for (a, b, c) in it:
- addop(a, b, out=c)
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ while not it.finished:
+ addop(it[0], it[1], out=it[2])
+ it.iternext()
- while not it.finished:
- addop(it[0], it[1], out=it[2])
- it.iternext()
-
- return it.operands[2]
+ return it.operands[2]
Here is an example outer product function::
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
- op_axes=[range(x.ndim)+[-1]*y.ndim,
- [-1]*x.ndim+range(y.ndim),
+ op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ [-1] * x.ndim + list(range(y.ndim)),
None])
-
- for (a, b, c) in it:
- mulop(a, b, out=c)
-
- return it.operands[2]
+ with it:
+ for (a, b, c) in it:
+ mulop(a, b, out=c)
+ return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
- return it.operands[0]
+ return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+ If operand flags `"writeonly"` or `"readwrite"` are used the operands may
+ be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
+ nditer must be used as a context manager or the nditer.close
+ method must be called before using the result. The temporary
+ data will be written back to the original data when the `__exit__`
+ function is called but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ array([-1, -2, -3]), array([-1, -2, -3])
+
+ It is important to note that once the iterator is exited, dangling
+ references (like `x` in the example) may or may not share data with
+ the original data `a`. If writeback semantics were active, i.e. if
+ `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+ will sever the connection between `x` and `a`, writing to `x` will
+ no longer write to `a`. If writeback semantics are not active, then
+ `x.data` will still point at some part of `a.data`, and writing to
+ one will affect the other.
+
""")
# nditer methods
"""))
+add_newdoc('numpy.core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
""")
+add_newdoc('numpy.core', 'nditer', ('close',
+ """
+ close()
+
+ Resolve all writeback semantics in writeable operands.
+
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
+ """))
###############################################################################
See Also
--------
- empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Notes
-----
See Also
--------
- empty_like, zeros, ones
+ empty_like : Return an empty array with shape and type of input.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Notes
-----
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
- ones : Return a new array setting values to one.
- zeros : Return a new array setting values to zero.
Notes
-----
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
- empty_like : Return an empty array with shape and type of input.
- ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ full : Return a new array of given shape filled with value.
Examples
--------
"""
lexsort(keys, axis=-1)
- Perform an indirect sort using a sequence of keys.
+ Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
""")
+add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
+ """
+ format_float_OSprintf_g(val, precision)
+
+ Print a floating point scalar using the system's printf function,
+ equivalent to:
+
+ printf("%.*g", precision, val);
+
+ for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
+ method is designed to help cross-validate the format_float_* methods.
+
+ Parameters
+ ----------
+ val : python float or numpy floating scalar
+ Value to format.
+
+ precision : non-negative integer, optional
+ Precision given to printf.
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+ format_float_positional
+ """)
+
##############################################################################
#
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
- number of outputs; use `None` for outputs to be allocated by the ufunc.
+ number of outputs; use `None` for uninitialized outputs to be
+ allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
+ of False indicate to leave the value in the output alone. Note that if
+ an uninitialized return array is created via the default ``out=None``,
+ then the elements where the values are False will remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
- provided, `r` will be equal to `out`. If the function has more than one
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False)
+ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+
+ .. versionadded:: 1.15.0
Returns
-------
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
-
+
+ You can use the ``initial`` keyword argument to initialize the reduction with a
+ different value.
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
+ array([14., 14.])
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
- `a[indices] += b`, except that results are accumulated for elements that
- are indexed more than once. For example, `a[[0,0]] += 1` will only
+ ``a[indices] += b``, except that results are accumulated for elements that
+ are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
- `add.at(a, [0,0], 1)` will increment the first element twice.
+ ``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
>>> print(a)
array([-1, -2, 3, 4])
- ::
-
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> print(a)
array([2, 3, 5, 4])
- ::
-
Add items 0 and 1 in first array to second array,
and store results in first array:
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
- One of None, 'auto', or a datetime unit.
+ One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
- ``pytz`` timezone object will print the appropriate offset::
+ ``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
- Passing in a unit will change the precision::
+ Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
- But can be made to not lose precision::
+ 'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
TypeError: Cannot create a datetime string as units 'h' from a NumPy
from os.path import join
from numpy.compat import isfileobj
-from numpy.testing import assert_, run_module_suite
+from numpy.testing import assert_
from numpy.testing import tempdir
with open(filename, 'rb') as f:
assert_(isfileobj(f))
-
-
-if __name__ == "__main__":
- run_module_suite()
_collect_results = {}
+#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
- " when collecting the test".format(old_mode,
+ " when collecting the test".format(old_mode,
new_mode))
-def pytest_addoption(parser):
- parser.addoption("--runslow", action="store_true",
- default=False, help="run slow tests")
-
-
-def pytest_collection_modifyitems(config, items):
- if config.getoption("--runslow"):
- # --runslow given in cli: do not skip slow tests
- return
- skip_slow = pytest.mark.skip(reason="need --runslow option to run")
- for item in items:
- if "slow" in item.keywords:
- item.add_marker(skip_slow)
-
-
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
-
-
-for module, replacement in {
- 'numpy.testing.decorators': 'numpy.testing.pytest_tools.decorators',
- 'numpy.testing.utils': 'numpy.testing.pytest_tools.utils',
-}.items():
- module = importlib.import_module(module)
- replacement = importlib.import_module(replacement)
- module.__dict__.clear()
- module.__dict__.update(replacement.__dict__)
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
-
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
-
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
# The name numpy.core._ufunc_reconstruct must be
del copyreg
del sys
del _ufunc_reduce
+
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
import re
import sys
-from numpy.compat import basestring
+from numpy.compat import basestring, unicode
from .multiarray import dtype, array, ndarray
try:
import ctypes
"""
oldnames = datatype.names
nameslist = list(oldnames)
- if isinstance(order, str):
+ if isinstance(order, (str, unicode)):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
out_args=out_args,
kwargs=kwargs
)
+
+
+def _is_from_ctypes(obj):
+ # determine if an object comes from ctypes, in order to work around
+ # a bug in the buffer protocol for those objects, bpo-10746
+ try:
+ # ctypes class are new-style, so have an __mro__. This probably fails
+ # for ctypes classes with multiple inheritance.
+ ctype_base = type(obj).__mro__[-2]
+ # right now, they're part of the _ctypes module
+ return 'ctypes' in ctype_base.__module__
+ except Exception:
+ return False
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
+from numpy._globals import _NoValue
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
-def _amax(a, axis=None, out=None, keepdims=False):
- return umr_maximum(a, axis, None, out, keepdims)
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_maximum(a, axis, None, out, keepdims, initial)
-def _amin(a, axis=None, out=None, keepdims=False):
- return umr_minimum(a, axis, None, out, keepdims)
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_minimum(a, axis, None, out, keepdims, initial)
-def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_sum(a, axis, dtype, out, keepdims)
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_sum(a, axis, dtype, out, keepdims, initial)
-def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
- return umr_prod(a, axis, dtype, out, keepdims)
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_prod(a, axis, dtype, out, keepdims, initial)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
- can be set are::
-
- - 'bool'
- - 'int'
- - 'timedelta' : a `numpy.timedelta64`
- - 'datetime' : a `numpy.datetime64`
- - 'float'
- - 'longfloat' : 128-bit floats
- - 'complexfloat'
- - 'longcomplexfloat' : composed of two 128-bit floats
- - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- - 'object' : `np.object_` arrays
- - 'str' : all other strings
-
- Other keys that can be used to set a group of types at once are::
-
- - 'all' : sets all types
- - 'int_kind' : sets 'int'
- - 'float_kind' : sets 'float' and 'longfloat'
- - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- - 'str_kind' : sets 'str' and 'numpystr'
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'object' : `np.object_` arrays
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- - 'fixed' : Always print exactly `precision` fractional digits,
- even if this would print more or fewer digits than
- necessary to specify the value uniquely.
- - 'unique : Print the minimum number of fractional digits necessary
- to represent each value uniquely. Different elements may
- have a different number of digits. The value of the
- `precision` option is ignored.
- - 'maxprec' : Print at most `precision` fractional digits, but if
- an element can be uniquely represented with fewer digits
- only print it with that many.
- - 'maxprec_equal' : Print at most `precision` fractional digits,
- but if every element in the array can be uniquely
- represented with an equal number of fewer digits, use that
- many digits for all elements.
+
+ * 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ * 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ * 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ * 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
- can be set are::
-
- - 'bool'
- - 'int'
- - 'timedelta' : a `numpy.timedelta64`
- - 'datetime' : a `numpy.datetime64`
- - 'float'
- - 'longfloat' : 128-bit floats
- - 'complexfloat'
- - 'longcomplexfloat' : composed of two 128-bit floats
- - 'void' : type `numpy.void`
- - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- - 'str' : all other strings
-
- Other keys that can be used to set a group of types at once are::
-
- - 'all' : sets all types
- - 'int_kind' : sets 'int'
- - 'float_kind' : sets 'float' and 'longfloat'
- - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- - 'str_kind' : sets 'str' and 'numpystr'
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'void' : type `numpy.void`
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- - 'fixed' : Always print exactly `precision` fractional digits,
- even if this would print more or fewer digits than
- necessary to specify the value uniquely.
- - 'unique : Print the minimum number of fractional digits necessary
- to represent each value uniquely. Different elements may
- have a different number of digits. The value of the
- `precision` option is ignored.
- - 'maxprec' : Print at most `precision` fractional digits, but if
- an element can be uniquely represented with fewer digits
- only print it with that many.
- - 'maxprec_equal' : Print at most `precision` fractional digits,
- but if every element in the array can be uniquely
- represented with an equal number of fewer digits, use that
- many digits for all elements.
+
+ - 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ - 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ - 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ - 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
options.update(overrides)
if options['legacy'] == '1.13':
+ if style is np._NoValue:
+ style = repr
+
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
value.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
- k : keep trailing zeros, keep decimal point (no trimming)
- . : trim all trailing zeros, leave decimal point
- 0 : trim all but the zero before the decimal point. Insert the
- zero if it is missing.
- - : trim trailing zeros and any trailing decimal point
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
digits, before or after the decimal point, ignoring leading zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
- k : keep trailing zeros, keep decimal point (no trimming)
- . : trim all trailing zeros, leave decimal point
- 0 : trim all but the zero before the decimal point. Insert the
- zero if it is missing.
- - : trim trailing zeros and any trailing decimal point
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Examples
--------
- >>> np.format_float_scientific(np.float32(np.pi))
+ >>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
0x0000000b = edb1ba83730c650fd9bc5772a919cda7
# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy,
+# Version 12 (NumPy 1.15) No change.
# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase.
0x0000000c = a1bc756c5782853ec2e3616cf66869d8
+
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
+ [TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'logical_and':
Ufunc(2, 1, One,
del defdict['divide']
def indent(st, spaces):
- indention = ' '*spaces
- indented = indention + st.replace('\n', '\n'+indention)
+ indentation = ' '*spaces
+ indented = indentation + st.replace('\n', '\n'+indentation)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
for vt in t.simd:
code2list.append(textwrap.dedent("""\
#ifdef HAVE_ATTRIBUTE_TARGET_{ISA}
- if (NPY_CPU_SUPPORTS_{ISA}) {{
+ if (npy_cpu_supports("{isa}")) {{
{fname}_functions[{idx}] = {type}_{fname}_{isa};
}}
#endif
Please make changes to the code generator program (%s)
**/
-
+ #include "cpuid.h"
%s
static int
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
-needs to be updated.
+needs to be updated in both setup_common.py and by adding an appropriate
+entry to cversion.txt (generate the hash via "python cversions.py").
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
return docdict.get(name)
# common parameter text to all ufuncs
-_params_text = textwrap.dedent("""
- out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If provided, it must have
- a shape that the inputs broadcast to. If not provided or `None`,
- a freshly-allocated array is returned. A tuple (possible only as a
- keyword argument) must have length equal to the number of outputs.
- where : array_like, optional
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
- **kwargs
- For other keyword-only arguments, see the
- :ref:`ufunc docs <ufuncs.kwargs>`.
-""").strip()
+subst = {
+ 'PARAMS': textwrap.dedent("""
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the inputs broadcast to. If not provided or `None`,
+ a freshly-allocated array is returned. A tuple (possible only as a
+ keyword argument) must have length equal to the number of outputs.
+ where : array_like, optional
+ Values of True indicate to calculate the ufunc at that position, values
+ of False indicate to leave the value in the output alone.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+ """).strip(),
+ 'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
+ 'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
+}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
- doc = doc.replace('$PARAMS', _params_text)
+
+ if name[0] != '_':
+ if '\nx :' in doc:
+ assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
+ elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
+ assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
+ else:
+ assert False, "Could not detect number of inputs in {}".format(name)
+ for k, v in subst.items():
+ doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
+ $OUT_SCALAR_1
Examples
--------
Returns
-------
add : ndarray or scalar
- The sum of `x1` and `x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ The sum of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
Notes
-----
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
- `x`-coordinate in radians [0, pi]. If `x` is a scalar then a
- scalar is returned, otherwise an array of the same shape as `x`
- is returned.
+ `x`-coordinate in radians [0, pi].
+ $OUT_SCALAR_1
See Also
--------
-------
arccosh : ndarray
Array of the same shape as `x`.
+ $OUT_SCALAR_1
See Also
--------
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
- closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
- is returned, otherwise an array.
+ closed interval ``[-pi/2, pi/2]``.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Array of the same shape as `x`.
+ $OUT_SCALAR_1
Notes
-----
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
- It is a scalar if `x` is a scalar.
+ $OUT_SCALAR_1
See Also
--------
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Array of the same shape as `x`.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
Result.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
Result.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
Result.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray or scalar
The truncated value of each element in `x`.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
+ $OUT_SCALAR_1
Examples
--------
-------
y : ndarray
The corresponding cosine values.
+ $OUT_SCALAR_1
Notes
-----
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Output array of same shape as `x`.
+ $OUT_SCALAR_1
Examples
--------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
The corresponding angle in degrees.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
+ $OUT_SCALAR_2
Notes
-----
Returns
-------
y : ndarray or scalar
- The quotient ``x1/x2``, element-wise. Returns a scalar if
- both ``x1`` and ``x2`` are scalars.
+ The quotient ``x1/x2``, element-wise.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : ndarray or bool
- Output array of bools, or a single bool if x1 and x2 are scalars.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Output array, element-wise exponential of `x`.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Element-wise 2 to the power `x`.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray or scalar
The floor of each element in `x`.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
y = floor(`x1`/`x2`)
-
+ $OUT_SCALAR_2
See Also
--------
Parameters
----------
x1 : array_like
- Dividend.
+ Dividend.
x2 : array_like
- Divisor.
+ Divisor.
$PARAMS
Returns
-------
y : array_like
- The remainder of the division of `x1` by `x2`.
+ The remainder of the division of `x1` by `x2`.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : bool or ndarray of bool
- Array of bools, or a single bool if `x1` and `x2` are scalars.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
Returns
-------
out : bool or ndarray of bool
- Array of bools, or a single bool if `x1` and `x2` are scalars.
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
--------
-------
z : ndarray
The hypotenuse of the triangle(s).
+ $OUT_SCALAR_2
Examples
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
Result.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
y : ndarray, bool
- For scalar input, the result is a new boolean with value True
- if the input is finite; otherwise the value is False (input is
- either positive infinity, negative infinity or Not a Number).
-
- For array input, the result is a boolean array with the same
- dimensions as the input and the values are True if the
- corresponding element of the input is finite; otherwise the values
- are False (element is either positive infinity, negative infinity
- or Not a Number).
+ True where ``x`` is not positive infinity, negative infinity,
+ or NaN; false otherwise.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
y : bool (scalar) or boolean ndarray
- For scalar input, the result is a new boolean with value True if
- the input is positive or negative infinity; otherwise the value is
- False.
-
- For array input, the result is a boolean array with the same shape
- as the input and the values are True where the corresponding
- element of the input is positive or negative infinity; elsewhere
- the values are False. If a second argument was supplied the result
- is stored there. If the type of that array is a numeric type the
- result is represented as zeros and ones, if the type is boolean
- then as False and True, respectively. The return value `y` is then
- a reference to that array.
+ True where ``x`` is positive or negative infinity, false otherwise.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
y : ndarray or bool
- For scalar input, the result is a new boolean with value True if
- the input is NaN; otherwise the value is False.
-
- For array input, the result is a boolean array of the same
- dimensions as the input and the values are True if the
- corresponding element of the input is NaN; otherwise the values are
- False.
+ True where ``x`` is NaN, false otherwise.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
y : ndarray or bool
- For scalar input, the result is a new boolean with value True if
- the input is NaT; otherwise the value is False.
-
- For array input, the result is a boolean array of the same
- dimensions as the input and the values are True if the
- corresponding element of the input is NaT; otherwise the values are
- False.
+ True where ``x`` is NaT, false otherwise.
+ $OUT_SCALAR_1
See Also
--------
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : bool or ndarray of bool
- Array of bools, or a single bool if `x1` and `x2` are scalars.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
- out : bool or ndarray of bool
- Array of bools, or a single bool if `x1` and `x2` are scalars.
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray
The natural logarithm of `x`, element-wise.
+ $OUT_SCALAR_1
See Also
--------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
Base-2 logarithm of `x`.
+ $OUT_SCALAR_1
See Also
--------
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
+ $OUT_SCALAR_2
See Also
--------
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
+ $OUT_SCALAR_1
See Also
--------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
+ $OUT_SCALAR_2
See Also
--------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
+ $OUT_SCALAR_1
See Also
--------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
+ $OUT_SCALAR_2
See Also
--------
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
y : ndarray or scalar
- The maximum of `x1` and `x2`, element-wise. Returns scalar if
- both `x1` and `x2` are scalars.
+ The maximum of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
y : ndarray or scalar
- The minimum of `x1` and `x2`, element-wise. Returns scalar if
- both `x1` and `x2` are scalars.
+ The minimum of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
y : ndarray or scalar
- The maximum of `x1` and `x2`, element-wise. Returns scalar if
- both `x1` and `x2` are scalars.
+ The maximum of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
See Also
--------
Returns
-------
y : ndarray or scalar
- The minimum of `x1` and `x2`, element-wise. Returns scalar if
- both `x1` and `x2` are scalars.
+ The minimum of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
See Also
--------
-------
y1 : ndarray
Fractional part of `x`.
+ $OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
+ $OUT_SCALAR_1
Notes
-----
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ both `x1` and `x2` are scalars.
+ $OUT_SCALAR_2
Notes
-----
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
+ $OUT_SCALAR_1
Examples
--------
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
+ $OUT_SCALAR_1
Notes
-----
Parameters
----------
x1, x2 : array_like
- Input arrays.
+ Input arrays.
$PARAMS
Returns
-------
- not_equal : ndarray bool, scalar bool
- For each element in `x1, x2`, return True if `x1` is not equal
- to `x2` and False otherwise.
-
+ out : ndarray or scalar
+ Output array, element-wise comparison of `x1` and `x2`.
+ Typically of type bool, unless ``dtype=object`` is passed.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
+ $OUT_SCALAR_2
See Also
--------
-------
y : ndarray
The corresponding radian values.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
The corresponding angle in radians.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
Return array.
+ $OUT_SCALAR_1
Notes
-----
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
- Returns a scalar if both `x1` and `x2` are scalars.
+ $OUT_SCALAR_2
See Also
--------
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
+ $OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
+ $OUT_SCALAR_2
See Also
--------
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
+ $OUT_SCALAR_2
See Also
--------
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
+ $OUT_SCALAR_1
See Also
--------
Parameters
----------
x : array_like
- Input values.
+ Input values.
$PARAMS
Returns
-------
y : ndarray
- The sign of `x`.
+ The sign of `x`.
+ $OUT_SCALAR_1
Notes
-----
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
+ $OUT_SCALAR_1
Examples
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
The values of `x1` with the sign of `x2`.
+ $OUT_SCALAR_2
Examples
--------
Returns
-------
- out : array_like
+ out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
+ $OUT_SCALAR_2
Examples
--------
Returns
-------
- out : array_like
- The spacing of values of `x1`.
+ out : ndarray or scalar
+ The spacing of values of `x`.
+ $OUT_SCALAR_1
Notes
-----
-------
y : array_like
The sine of each element of x.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray
The corresponding hyperbolic sine values.
+ $OUT_SCALAR_1
Notes
-----
add_newdoc('numpy.core.umath', 'sqrt',
"""
- Return the positive square-root of an array, element-wise.
+ Return the non-negative square-root of an array, element-wise.
Parameters
----------
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
+ $OUT_SCALAR_1
See Also
--------
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
+ $OUT_SCALAR_1
Examples
Returns
-------
- out : ndarray
+ out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
- Returns scalar if `x` is a scalar.
+ $OUT_SCALAR_1
See Also
--------
Returns
-------
y : ndarray
- The difference of `x1` and `x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ The difference of `x1` and `x2`, element-wise.
+ $OUT_SCALAR_2
Notes
-----
Parameters
----------
x : array_like
- Input array.
+ Input array.
$PARAMS
Returns
-------
y : ndarray
- The corresponding tangent values.
+ The corresponding tangent values.
+ $OUT_SCALAR_1
Notes
-----
-------
y : ndarray
The corresponding hyperbolic tangent values.
+ $OUT_SCALAR_1
Notes
-----
Returns
-------
- out : ndarray
- Result is scalar if both inputs are scalar, ndarray otherwise.
+ out : ndarray or scalar
+ $OUT_SCALAR_2
Notes
-----
Returns
-------
- (mantissa, exponent) : tuple of ndarrays, (float, int)
- `mantissa` is a float array with values between -1 and 1.
- `exponent` is an int array which represents the exponent of 2.
+ mantissa : ndarray
+ Floating values between -1 and 1.
+ $OUT_SCALAR_1
+ exponent : ndarray
+ Integer exponents of 2.
+ $OUT_SCALAR_1
See Also
--------
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
+ $OUT_SCALAR_2
See Also
--------
add_newdoc('numpy.core.umath', 'gcd',
"""
- Returns the greatest common divisor of |x1| and |x2|
+ Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
+ $OUT_SCALAR_2
See Also
--------
add_newdoc('numpy.core.umath', 'lcm',
"""
- Returns the lowest common multiple of |x1| and |x2|
+ Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
+ $OUT_SCALAR_2
See Also
--------
"""
from __future__ import division, absolute_import, print_function
+import itertools
+
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot
einsum_symbols_set = set(einsum_symbols)
+def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
+ """
+ Computes the number of FLOPS in the contraction.
+
+ Parameters
+ ----------
+ idx_contraction : iterable
+ The indices involved in the contraction
+ inner : bool
+ Does this contraction require an inner product?
+ num_terms : int
+ The number of terms in a contraction
+ size_dictionary : dict
+ The size of each of the indices in idx_contraction
+
+ Returns
+ -------
+ flop_count : int
+ The total number of FLOPS required for the contraction.
+
+ Examples
+ --------
+
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
+ 90
+
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
+ 270
+
+ """
+
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
+ op_factor = max(1, num_terms - 1)
+ if inner:
+ op_factor += 1
+
+ return overall_size * op_factor
+
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
iter_results = []
# Compute all unique pairs
- comb_iter = []
- for x in range(len(input_sets) - iteration):
- for y in range(x + 1, len(input_sets) - iteration):
- comb_iter.append((x, y))
-
for curr in full_results:
cost, positions, remaining = curr
- for con in comb_iter:
+ for con in itertools.combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
if new_size > memory_limit:
continue
- # Find cost
- new_cost = _compute_size_by_dict(idx_contract, idx_dict)
- if idx_removed:
- new_cost *= 2
-
# Build (total_cost, positions, indices_remaining)
- new_cost += cost
+ total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
new_pos = positions + [con]
- iter_results.append((new_cost, new_pos, new_input_sets))
+ iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
path = min(full_results, key=lambda x: x[0])[1]
return path
+def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
+ """Compute the cost (removed size + flops) and resultant indices for
+ performing the contraction specified by ``positions``.
+
+ Parameters
+ ----------
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ input_sets : list of sets
+ The indices found on each tensors.
+ output_set : set
+ The output indices of the expression.
+ idx_dict : dict
+ Mapping of each index to its size.
+ memory_limit : int
+ The total allowed size for an intermediary tensor.
+ path_cost : int
+ The contraction cost so far.
+ naive_cost : int
+ The cost of the unoptimized expression.
+
+ Returns
+ -------
+ cost : (int, int)
+ A tuple containing the size of any indices removed, and the flop cost.
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ new_input_sets : list of sets
+ The resulting new list of indices if this proposed contraction is performed.
+
+ """
+
+ # Find the contraction
+ contract = _find_contraction(positions, input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
+ if new_size > memory_limit:
+ return None
+
+ # Build sort tuple
+ old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
+ removed_size = sum(old_sizes) - new_size
+
+ # NB: removed_size used to be just the size of any removed indices i.e.:
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
+ sort = (-removed_size, cost)
+
+ # Sieve based on total cost as well
+ if (path_cost + cost) > naive_cost:
+ return None
+
+ # Add contraction to possible choices
+ return [sort, positions, new_input_sets]
+
+
+def _update_other_results(results, best):
+ """Update the positions and provisional input_sets of ``results`` based on
+ performing the contraction result ``best``. Remove any involving the tensors
+ contracted.
+
+ Parameters
+ ----------
+ results : list
+ List of contraction results produced by ``_parse_possible_contraction``.
+ best : list
+ The best contraction of ``results`` i.e. the one that will be performed.
+
+ Returns
+ -------
+ mod_results : list
+ The list of modifed results, updated with outcome of ``best`` contraction.
+ """
+
+ best_con = best[1]
+ bx, by = best_con
+ mod_results = []
+
+ for cost, (x, y), con_sets in results:
+
+ # Ignore results involving tensors just contracted
+ if x in best_con or y in best_con:
+ continue
+
+ # Update the input_sets
+ del con_sets[by - int(by > x) - int(by > y)]
+ del con_sets[bx - int(bx > x) - int(bx > y)]
+ con_sets.insert(-1, best[2][-1])
+
+ # Update the position indices
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
+ mod_results.append((cost, mod_con, con_sets))
+
+ return mod_results
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
[(0, 2), (0, 1)]
"""
+ # Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
+ elif len(input_sets) == 2:
+ return [(0, 1)]
+
+ # Build up a naive cost
+ contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+ naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
+ # Initially iterate over all pairs
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
+ known_contractions = []
+
+ path_cost = 0
path = []
- for iteration in range(len(input_sets) - 1):
- iteration_results = []
- comb_iter = []
- # Compute all unique pairs
- for x in range(len(input_sets)):
- for y in range(x + 1, len(input_sets)):
- comb_iter.append((x, y))
+ for iteration in range(len(input_sets) - 1):
+ # Iterate over all pairs on first step, only previously found pairs on subsequent steps
for positions in comb_iter:
- # Find the contraction
- contract = _find_contraction(positions, input_sets, output_set)
- idx_result, new_input_sets, idx_removed, idx_contract = contract
-
- # Sieve the results based on memory_limit
- if _compute_size_by_dict(idx_result, idx_dict) > memory_limit:
+ # Always initially ignore outer products
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
- # Build sort tuple
- removed_size = _compute_size_by_dict(idx_removed, idx_dict)
- cost = _compute_size_by_dict(idx_contract, idx_dict)
- sort = (-removed_size, cost)
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
+ naive_cost)
+ if result is not None:
+ known_contractions.append(result)
- # Add contraction to possible choices
- iteration_results.append([sort, positions, new_input_sets])
+ # If we do not have a inner contraction, rescan pairs including outer products
+ if len(known_contractions) == 0:
- # If we did not find a new contraction contract remaining
- if len(iteration_results) == 0:
- path.append(tuple(range(len(input_sets))))
- break
+ # Then check the outer products
+ for positions in itertools.combinations(range(len(input_sets)), 2):
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
+ path_cost, naive_cost)
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we still did not find any remaining contractions, default back to einsum like behavior
+ if len(known_contractions) == 0:
+ path.append(tuple(range(len(input_sets))))
+ break
# Sort based on first index
- best = min(iteration_results, key=lambda x: x[0])
- path.append(best[1])
+ best = min(known_contractions, key=lambda x: x[0])
+
+ # Now propagate as many unused contractions as possible to next iteration
+ known_contractions = _update_other_results(known_contractions, best)
+
+ # Next iteration only compute contractions with the new tensor
+ # All other contractions have been accounted for
input_sets = best[2]
+ new_tensor_pos = len(input_sets) - 1
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
+
+ # Update path and total cost
+ path.append(best[1])
+ path_cost += best[0][1]
return path
if len(inputs) != 2:
return False
- # Build a few temporaries
input_left, input_right = inputs
+
+ for c in set(input_left + input_right):
+ # can't deal with repeated indices on same input or more than 2 total
+ nl, nr = input_left.count(c), input_right.count(c)
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
+ return False
+
+ # can't do implicit summation or dimension collapse e.g.
+ # "ab,bc->c" (implicitly sum over 'a')
+ # "ab,ca->ca" (take diagonal of 'a')
+ if nl + nr - 1 == int(c in result):
+ return False
+
+ # Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
- # Indices must overlap between the two operands
- if not len(set_left & set_right):
- return False
-
- # We cannot have duplicate indices ("ijj, jk -> ik")
- if (len(set_left) != len(input_left)) or (len(set_right) != len(input_right)):
- return False
-
- # Cannot handle partial inner ("ij, ji -> i")
- if len(keep_left & keep_right):
- return False
-
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
+ broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
% (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
+
+ # Build out broadcast indices
+ if dim == 1:
+ broadcast_indices[tnum].append(char)
+
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
else:
dimension_dict[char] = dim
+ # Convert broadcast inds to sets
+ broadcast_indices = [set(x) for x in broadcast_indices]
+
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
- naive_cost = _compute_size_by_dict(indices, dimension_dict)
- indices_in_input = input_subscripts.replace(',', '')
- mult = max(len(input_list) - 1, 1)
- if (len(indices_in_input) - len(set(indices_in_input))):
- mult *= 2
- naive_cost *= mult
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
+ naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
- # Maximum memory should be at most out_size for this algorithm
- memory_arg = min(memory_arg, max_size)
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
- cost = _compute_size_by_dict(idx_contract, dimension_dict)
- if idx_removed:
- cost *= 2
+ cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
+ bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
+ bcast |= broadcast_indices.pop(x)
- do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+ new_bcast_inds = bcast - idx_removed
+
+ # If we're broadcasting, nix blas
+ if not len(idx_removed & bcast):
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+ else:
+ do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
+ broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
"""
- # Grab non-einsum kwargs; never optimize 2-argument case.
- optimize_arg = kwargs.pop('optimize', len(operands) > 3)
+ # Grab non-einsum kwargs; do not optimize by default.
+ optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
tmp_operands.append(operands.pop(x))
# Do we need to deal with the output?
- if specified_out and ((num + 1) == len(contraction_list)):
- handle_out = True
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
- # Handle broadcasting vs BLAS cases
+ # Call tensordot if still possible
if blas:
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
- if 1 in tmp_operands[0] or 1 in tmp_operands[1]:
- left_dims = {dim: size for dim, size in
- zip(input_left, tmp_operands[0].shape)}
- right_dims = {dim: size for dim, size in
- zip(input_right, tmp_operands[1].shape)}
- # If dims do not match we are broadcasting, BLAS off
- if any(left_dims[ind] != right_dims[ind] for ind in idx_rm):
- blas = False
- # Call tensordot if still possible
- if blas:
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
- # Append new items and derefernce what we can
+ # Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
See Also
--------
putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
- In other words, ``a[index_array]`` yields a partitioned `a`.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
order. The three available algorithms have the following
properties:
- =========== ======= ============= ============ =======
- kind speed worst case work space stable
- =========== ======= ============= ============ =======
+ =========== ======= ============= ============ ========
+ kind speed worst case work space stable
+ =========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
- =========== ======= ============= ============ =======
+ =========== ======= ============= ============ ========
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
+ 'stable' automatically choses the best stable sorting algorithm
+ for the data type being sorted. It is currently mapped to
+ merge sort.
+
Examples
--------
>>> a = np.array([[1,4],[3,1]])
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the sorted `a`, irrespective of dimensionality.
See Also
--------
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
- try:
- # First try to use the new axis= parameter
- return squeeze(axis=axis)
- except TypeError:
- # For backwards compatibility
+ if axis is None:
return squeeze()
-
+ else:
+ return squeeze(axis=axis)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Returns
-------
array_of_diagonals : ndarray
- If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a`
- containing the diagonal is returned. If `a` is a `matrix`, a 1-D
- array containing the diagonal is returned in order to maintain
- backward compatibility.
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Returns
-------
y : array_like
- If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
- the same subtype as `a`. The shape of the returned array is
- ``(a.size,)``. Matrices are special cased for backward
- compatibility.
+ y is an array of the same subtype as `a`, with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility, if `a`
+ is a matrix, then y is a 1-D ndarray.
See Also
--------
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
-def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Sum of array elements over a given axis.
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
"""
if isinstance(a, _gentype):
+ # 2018-02-25, 1.15.0
+ warnings.warn(
+ "Calling np.sum(generator) is deprecated, and in the future will give a different result. "
+ "Use np.sum(np.from_iter(generator)) or the python sum builtin instead.",
+ DeprecationWarning, stacklevel=2)
+
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
- return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def any(a, axis=None, out=None, keepdims=np._NoValue):
return _methods._ptp(a, axis=axis, out=out, **kwargs)
-def amax(a, axis=None, out=None, keepdims=np._NoValue):
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+
Returns
-------
amax : ndarray or scalar
>>> np.nanmax(b)
4.0
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.max([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.max([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
"""
- return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims,
+ initial=initial)
-def amin(a, axis=None, out=None, keepdims=np._NoValue):
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
Returns
-------
amin : ndarray or scalar
>>> np.nanmin(b)
0.0
+ >>> np.min([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.min([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
"""
- return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims)
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims,
+ initial=initial)
def alen(a):
return len(array(a, ndmin=1))
-def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the product of array elements over a given axis.
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
Returns
-------
>>> np.prod(x).dtype == int
True
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
"""
- return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims)
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
def cumprod(a, axis=None, dtype=None, out=None):
(k)*PyArray_STRIDES(obj)[2] + \
(l)*PyArray_STRIDES(obj)[3]))
+/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
static NPY_INLINE void
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
{
- if (arr != NULL) {
- if ((PyArray_FLAGS(arr) & NPY_ARRAY_WRITEBACKIFCOPY) ||
- (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY)) {
- PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr);
- PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE);
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
+ (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
}
dict.
*/
-#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \
- (PyTuple_GET_ITEM((value), 2) == (key)))
+static NPY_INLINE int
+NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
+{
+ PyObject *title;
+ if (PyTuple_GET_SIZE(value) != 3) {
+ return 0;
+ }
+ title = PyTuple_GET_ITEM(value, 2);
+ if (key == title) {
+ return 1;
+ }
+#ifdef PYPY_VERSION
+ /*
+ * On PyPy, dictionary keys do not always preserve object identity.
+ * Fall back to comparison by value.
+ */
+ if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
+ return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
+ }
+#if PY_VERSION_HEX < 0x03000000
+ if (PyString_Check(title) && PyString_Check(key)) {
+ return PyObject_Compare(title, key) == 0 ? 1 : 0;
+ }
+#endif
+#endif
+ return 0;
+}
+/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
+#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
#endif
+/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
+#if (PY_VERSION_HEX < 0x02070B00) || \
+ ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
+ #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
+#else
+ #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
+#endif
+
/*
* PyString -> PyBytes
*/
#endif
#endif
-#ifdef HAVE___BUILTIN_CPU_SUPPORTS
- #ifdef HAVE_ATTRIBUTE_TARGET_AVX2
- #define NPY_CPU_SUPPORTS_AVX2 __builtin_cpu_supports("avx2")
- #else
- #define NPY_CPU_SUPPORTS_AVX2 0
- #endif
- #ifdef HAVE_ATTRIBUTE_TARGET_AVX
- #define NPY_CPU_SUPPORTS_AVX __builtin_cpu_supports("avx")
- #else
- #define NPY_CPU_SUPPORTS_AVX 0
- #endif
-#else
- #define NPY_CPU_SUPPORTS_AVX 0
- #define NPY_CPU_SUPPORTS_AVX2 0
-#endif
-
#if defined(_MSC_VER)
#define NPY_INLINE __inline
#elif defined(__GNUC__)
* NPY_CPU_SH_BE
* NPY_CPU_ARCEL
* NPY_CPU_ARCEB
+ * NPY_CPU_RISCV64
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
* _M_AMD64 defined by MS compiler
*/
#define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_PPC64
#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
/*
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
* but can't find it ATM
* _ARCH_PPC is used by at least gcc on AIX
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+ * for those specifically first before defaulting to ppc
*/
#define NPY_CPU_PPC
-#elif defined(__ppc64le__)
- #define NPY_CPU_PPC64LE
-#elif defined(__ppc64__)
- #define NPY_CPU_PPC64
#elif defined(__sparc__) || defined(__sparc)
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
#define NPY_CPU_SPARC
#define NPY_CPU_HPPA
#elif defined(__alpha__)
#define NPY_CPU_ALPHA
-#elif defined(__arm__) && defined(__ARMEL__)
- #define NPY_CPU_ARMEL
-#elif defined(__arm__) && defined(__ARMEB__)
- #define NPY_CPU_ARMEB
+#elif defined(__arm__) || defined(__aarch64__)
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH64
+ #else
+ #define NPY_CPU_ARMEB
+ #endif
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH64
+ #else
+ #define NPY_CPU_ARMEL
+ #endif
+ #else
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+ #endif
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_SH_LE
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_MIPSEB
#elif defined(__or1k__)
#define NPY_CPU_OR1K
-#elif defined(__aarch64__)
- #define NPY_CPU_AARCH64
#elif defined(__mc68000__)
#define NPY_CPU_M68K
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_ARCEL
#elif defined(__arc__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_ARCEB
+#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
+ #define NPY_CPU_RISCV64
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL) \
|| defined(NPY_CPU_PPC64LE) \
- || defined(NPY_CPU_ARCEL)
+ || defined(NPY_CPU_ARCEL) \
+ || defined(NPY_CPU_RISCV64)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
|| defined(NPY_CPU_SPARC) \
Simple Interface:
-In your C-extension: around a block of code you want to be interruptable
+In your C-extension: around a block of code you want to be interruptible
with a SIGINT
NPY_SIGINT_ON
#define NPY_FPE_UNDERFLOW 4
#define NPY_FPE_INVALID 8
-int npy_get_floatstatus(void);
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
void npy_set_floatstatus_divbyzero(void);
void npy_set_floatstatus_overflow(void);
void npy_set_floatstatus_underflow(void);
#define NPY_1_12_API_VERSION 0x00000008
#define NPY_1_13_API_VERSION 0x00000008
#define NPY_1_14_API_VERSION 0x00000008
+#define NPY_1_15_API_VERSION 0x00000008
#endif
int *core_dim_ixs;
/*
* positions of 1st core dimensions of each
- * argument in core_dim_ixs
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
*/
int *core_offsets;
/* signature string for printing purpose */
else:
if not isinstance(shape, tuple):
shape = (shape,)
- size = 1
+ size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
from __future__ import division, absolute_import, print_function
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
See Also
--------
- ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
Examples
--------
See Also
--------
- zeros, ones_like
+ ones_like : Return an array of ones with shape and type of input.
+ empty : Return a new uninitialized array.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
Examples
--------
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
- zeros : Return a new array setting values to zero.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
Examples
--------
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
- empty_like : Return an empty array with shape and type of input.
- full_like : Fill an array with shape and type of input.
- zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
+ full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
Examples
--------
See Also
--------
- zeros_like : Return an array of zeros with shape and type of input.
- ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
- zeros : Return a new array setting values to zero.
- ones : Return a new array setting values to one.
- empty : Return a new uninitialized array.
- full : Fill a new array.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full : Return a new array of given shape filled with value.
Examples
--------
Contrary to `asanyarray`, ndarray subclasses are not passed through:
- >>> issubclass(np.matrix, np.ndarray)
+ >>> issubclass(np.recarray, np.ndarray)
True
- >>> a = np.matrix([[1, 2]])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
Instances of `ndarray` subclasses are passed through as-is:
- >>> a = np.matrix([1, 2])
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
'11101'
"""
- def warn_if_insufficient(width, binwdith):
+ def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
-
+
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Notes
-----
- The floating-point exceptions are defined in the IEEE 754 standard [1]:
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
generic
+-> bool_ (kind=b)
- +-> number (kind=i)
- | integer
- | signedinteger (intxx)
- | byte
- | short
- | intc
- | intp int0
- | int_
- | longlong
- +-> unsignedinteger (uintxx) (kind=u)
- | ubyte
- | ushort
- | uintc
- | uintp uint0
- | uint_
- | ulonglong
- +-> inexact
- | +-> floating (floatxx) (kind=f)
- | | half
- | | single
- | | float_ (double)
- | | longfloat
- | \\-> complexfloating (complexxx) (kind=c)
- | csingle (singlecomplex)
- | complex_ (cfloat, cdouble)
- | clongfloat (longcomplex)
+ +-> number
+ | +-> integer
+ | | +-> signedinteger (intxx) (kind=i)
+ | | | byte
+ | | | short
+ | | | intc
+ | | | intp int0
+ | | | int_
+ | | | longlong
+ | | \\-> unsignedinteger (uintxx) (kind=u)
+ | | ubyte
+ | | ushort
+ | | uintc
+ | | uintp uint0
+ | | uint_
+ | | ulonglong
+ | +-> inexact
+ | +-> floating (floatxx) (kind=f)
+ | | half
+ | | single
+ | | float_ (double)
+ | | longfloat
+ | \\-> complexfloating (complexxx) (kind=c)
+ | csingle (singlecomplex)
+ | complex_ (cfloat, cdouble)
+ | clongfloat (longcomplex)
+-> flexible
- | character
- | void (kind=V)
- |
- | str_ (string_, bytes_) (kind=S) [Python 2]
- | unicode_ (kind=U) [Python 2]
- |
- | bytes_ (string_) (kind=S) [Python 3]
- | str_ (unicode_) (kind=U) [Python 3]
- |
- \\-> object_ (not used much) (kind=O)
+ | +-> character
+ | | str_ (string_, bytes_) (kind=S) [Python 2]
+ | | unicode_ (kind=U) [Python 2]
+ | |
+ | | bytes_ (string_) (kind=S) [Python 3]
+ | | str_ (unicode_) (kind=U) [Python 3]
+ | |
+ | \\-> void (kind=V)
+ \\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
from . import numeric as sb
from . import numerictypes as nt
-from numpy.compat import isfileobj, bytes, long
+from numpy.compat import isfileobj, bytes, long, unicode
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
if (names):
if (type(names) in [list, tuple]):
pass
- elif isinstance(names, str):
+ elif isinstance(names, (str, unicode)):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Get long double representation
- if sys.platform != 'darwin':
- rep = check_long_double_representation(config_cmd)
- if rep in ['INTEL_EXTENDED_12_BYTES_LE',
- 'INTEL_EXTENDED_16_BYTES_LE',
- 'MOTOROLA_EXTENDED_12_BYTES_BE',
- 'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
- 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
- 'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
- moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
- else:
- raise ValueError("Unrecognized long double format: %s" % rep)
+ rep = check_long_double_representation(config_cmd)
+ moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
# Py3K check
if sys.version_info[0] == 3:
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
- # update the substition dictionary during npymath build
+ # update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
+ join('src', 'umath', 'cpuid.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
+# 0x0000000c - 1.15.x
C_API_VERSION = 0x0000000c
class MismatchCAPIWarning(Warning):
"stdio.h", "LINK_AVX"),
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
"stdio.h", "LINK_AVX2"),
+ ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
]
# function attributes
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
-_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
+_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
['000'] * 8)
-_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
+_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
['000'] * 8)
def long_double_representation(lines):
# the long double
if read[-8:] == _AFTER_SEQ:
saw = copy.copy(read)
+ # if the content was 12 bytes, we only have 32 - 8 - 12 = 12
+ # "before" bytes. In other words the first 4 "before" bytes went
+ # past the sliding window.
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
+ # if the content was 16 bytes, we are left with 32-8-16 = 16
+ # "before" bytes, so 8 went past the sliding window.
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
return 'IEEE_QUAD_BE'
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
return 'IEEE_QUAD_LE'
- elif read[8:-8] == _DOUBLE_DOUBLE_BE:
- return 'DOUBLE_DOUBLE_BE'
- elif read[8:-8] == _DOUBLE_DOUBLE_LE:
- return 'DOUBLE_DOUBLE_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
+ return 'IBM_DOUBLE_DOUBLE_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
+ return 'IBM_DOUBLE_DOUBLE_BE'
+ # if the content was 8 bytes, left with 32-8-8 = 16 bytes
elif read[:16] == _BEFORE_SEQ:
if read[16:-8] == _IEEE_DOUBLE_LE:
return 'IEEE_DOUBLE_LE'
#include <Python.h>
#define _NPY_NO_DEPRECATIONS /* for NPY_CHAR */
#include "numpy/arrayobject.h"
+#include "numpy/arrayscalars.h"
#include "numpy/npy_math.h"
+#include "numpy/halffloat.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
#include "common.h"
return array;
}
+/* used to test WRITEBACKIFCOPY without resolution emits runtime warning */
+static PyObject*
+npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ int flags;
+ PyObject* array;
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
+ return NULL;
+ }
+ flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY;
+ array = PyArray_FromArray((PyArrayObject*)args, NULL, flags);
+ if (array == NULL)
+ return NULL;
+ Py_DECREF(array); /* calls array_dealloc even on PyPy */
+ Py_RETURN_NONE;
+}
+
/* resolve WRITEBACKIFCOPY */
static PyObject*
npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
+/* resolve WRITEBACKIFCOPY */
+static PyObject*
+npy_discard(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
+ return NULL;
+ }
+ PyArray_DiscardWritebackIfCopy((PyArrayObject*)args);
+ Py_RETURN_NONE;
+}
+
#if !defined(NPY_PY3K)
static PyObject *
int_subclass(PyObject *dummy, PyObject *args)
return NULL;
}
-
static PyObject *
array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
/**end repeat**/
+/*
+ * For development/testing purposes, it's convenient to have access to the
+ * system printf for floats. This is a very simple printf interface.
+ */
+PyObject *
+PrintFloat_Printf_g(PyObject *obj, int precision)
+{
+ char str[1024];
+
+ if (PyArray_IsScalar(obj, Half)) {
+ npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ PyOS_snprintf(str, sizeof(str), "%.*g", precision,
+ npy_half_to_double(x));
+ }
+ else if (PyArray_IsScalar(obj, Float)) {
+ npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
+ }
+ else if (PyArray_IsScalar(obj, Double)) {
+ npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
+ /* would be better to use lg, but not available in C90 */
+ }
+ else if (PyArray_IsScalar(obj, LongDouble)) {
+ npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ PyOS_snprintf(str, sizeof(str), "%.*Lg", precision, x);
+ }
+ else{
+ double val = PyFloat_AsDouble(obj);
+ if (error_converting(val)) {
+ return NULL;
+ }
+ PyOS_snprintf(str, sizeof(str), "%.*g", precision, val);
+ }
+
+ return PyUString_FromString(str);
+}
+
+
+static PyObject *
+printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
+{
+ PyObject *obj;
+ int precision;
+
+ if (!PyArg_ParseTuple(args,"Oi:format_float_OSprintf_g", &obj,
+ &precision)) {
+ return NULL;
+ }
+
+ if (precision < 0) {
+ PyErr_SetString(PyExc_TypeError, "precision must be non-negative");
+ return NULL;
+ }
+
+ return PrintFloat_Printf_g(obj, precision);
+}
static PyMethodDef Multiarray_TestsMethods[] = {
{"IsPythonScalar",
{"npy_create_writebackifcopy",
npy_create_writebackifcopy,
METH_O, NULL},
+ {"npy_abuse_writebackifcopy",
+ npy_abuse_writebackifcopy,
+ METH_O, NULL},
{"npy_resolve",
npy_resolve,
METH_O, NULL},
+ {"npy_discard",
+ npy_discard,
+ METH_O, NULL},
#if !defined(NPY_PY3K)
{"test_int_subclass",
int_subclass,
/**end repeat1**/
/**end repeat**/
-
+ {"format_float_OSprintf_g",
+ (PyCFunction)printf_float_g,
+ METH_VARARGS , NULL},
{NULL, NULL, 0, NULL} /* Sentinel */
};
allocated_src_data = 1;
}
+ if (PyDataType_FLAGCHK(PyArray_DESCR(dst), NPY_NEEDS_INIT)) {
+ memset(tmp_src_data, 0, PyArray_DESCR(dst)->elsize);
+ }
+
if (PyArray_CastRawArrays(1, src_data, tmp_src_data, 0, 0,
src_dtype, PyArray_DESCR(dst), 0) != NPY_SUCCEED) {
src_data = tmp_src_data;
PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
{
int ret;
-#ifdef PYPY_VERSION
- #ifndef DEPRECATE_UPDATEIFCOPY
- #define DEPRECATE_UPDATEIFCOPY
- #endif
-#endif
-
-#ifdef DEPRECATE_UPDATEIFCOPY
- /* TODO: enable this once a solution for UPDATEIFCOPY
- * and nditer are resolved, also pending the fix for GH7054
- */
- /* 2017-Nov-10 1.14 */
+ /* 2017-Nov -10 1.14 (for PyPy only) */
+ /* 2018-April-21 1.15 (all Python implementations) */
if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use "
"PyArray_SetWritebackIfCopyBase instead, and be sure to call "
"PyArray_ResolveWritebackIfCopy before the array is deallocated, "
"error, PyArray_DiscardWritebackIfCopy may be called instead to "
"throw away the scratch buffer.") < 0)
return -1;
-#endif
ret = PyArray_SetWritebackIfCopyBase(arr, base);
if (ret >=0) {
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
/*********************** end C-API functions **********************/
+
+/* dealloc must not raise an error, best effort try to write
+ to stderr and clear the error
+*/
+
+static NPY_INLINE void
+WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
+ if (PyErr_WarnEx(warning, msg, 1) < 0) {
+ PyObject * s;
+
+ s = PyUString_FromString("array_dealloc");
+ if (s) {
+ PyErr_WriteUnraisable(s);
+ Py_DECREF(s);
+ }
+ else {
+ PyErr_WriteUnraisable(Py_None);
+ }
+ }
+};
+
/* array object functions */
static void
int retval;
if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY)
{
- char * msg = "WRITEBACKIFCOPY requires a call to "
- "PyArray_ResolveWritebackIfCopy or "
- "PyArray_DiscardWritebackIfCopy before array_dealloc is "
- "called.";
- /* 2017-Nov-10 1.14 */
- if (DEPRECATE(msg) < 0) {
- /* dealloc cannot raise an error, best effort try to write
- to stderr and clear the error
- */
- PyErr_WriteUnraisable((PyObject *)&PyArray_Type);
- }
+ char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. "
+ " Required call to PyArray_ResolveWritebackIfCopy or "
+ "PyArray_DiscardWritebackIfCopy is missing.";
+ Py_INCREF(self); /* hold on to self in next call since if
+ * refcount == 0 it will recurse back into
+ *array_dealloc
+ */
+ WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
{
}
if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) {
/* DEPRECATED, remove once the flag is removed */
+ char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
+ " Required call to PyArray_ResolveWritebackIfCopy or "
+ "PyArray_DiscardWritebackIfCopy is missing";
Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
+ * refcount == 0 it will recurse back into
*array_dealloc
*/
+ /* 2017-Nov-10 1.14 */
+ WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
{
}
}
/*
- * In any case base is pointing to something that we need
+ * If fa->base is non-NULL, it is something
* to DECREF -- either a view or a buffer object
*/
Py_XDECREF(fa->base);
PyArray_NewFromDescr_int(subtype, descr,
(int)dims.len,
dims.ptr,
- strides.ptr, NULL, is_f_order, NULL,
+ strides.ptr, NULL, is_f_order, NULL, NULL,
0, 1);
if (ret == NULL) {
descr = NULL;
if (is_f_order) {
buffer.flags |= NPY_ARRAY_F_CONTIGUOUS;
}
- ret = (PyArrayObject *)\
- PyArray_NewFromDescr_int(subtype, descr,
- dims.len, dims.ptr,
- strides.ptr,
- offset + (char *)buffer.ptr,
- buffer.flags, NULL, 0, 1);
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(
+ subtype, descr,
+ dims.len, dims.ptr, strides.ptr, offset + (char *)buffer.ptr,
+ buffer.flags, NULL, buffer.base,
+ 0, 1);
if (ret == NULL) {
descr = NULL;
goto fail;
}
- PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
- Py_INCREF(buffer.base);
- if (PyArray_SetBaseObject(ret, buffer.base) < 0) {
- Py_DECREF(ret);
- ret = NULL;
- goto fail;
- }
}
npy_free_cache_dim_obj(dims);
*****************************************************************************
*/
+#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
+/*
+ * Disable harmless compiler warning "4116: unnamed type definition in
+ * parentheses" which is caused by the _ALIGN macro.
+ */
+#if defined(_MSC_VER)
+#pragma warning(disable:4116)
+#endif
+
/**begin repeat
*
}
return -1;
}
- if (ap == NULL || PyArray_ISBEHAVED(ap))
+ if (ap == NULL || PyArray_ISBEHAVED(ap)) {
+ assert(npy_is_aligned(ov, _ALIGN(@type@)));
*((@type@ *)ov)=temp;
+ }
else {
PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
ap);
{
PyArrayObject *ap = vap;
char *ip = input;
- PyArrayObject *u = NULL;
PyArray_Descr* descr;
- int itemsize;
descr = PyArray_DESCR(ap);
if (PyDataType_HASFIELDS(descr)) {
return NULL;
}
Py_INCREF(descr->subarray->base);
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr->subarray->base, shape.len, shape.ptr,
- NULL, ip, PyArray_FLAGS(ap)&(~NPY_ARRAY_F_CONTIGUOUS), NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, descr->subarray->base,
+ shape.len, shape.ptr, NULL, ip,
+ PyArray_FLAGS(ap) & ~NPY_ARRAY_F_CONTIGUOUS,
+ NULL, (PyObject *)ap);
npy_free_cache_dim_obj(shape);
- if (!ret) {
- return NULL;
- }
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(ret, (PyObject *)ap) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- PyArray_UpdateFlags((PyArrayObject *)ret, NPY_ARRAY_UPDATE_ALL);
return (PyObject *)ret;
}
- /* 2017-11-26, 1.14 */
- if (DEPRECATE_FUTUREWARNING(
- "the `.item()` method of unstructured void types will return an "
- "immutable `bytes` object in the near future, the same as "
- "returned by `bytes(void_obj)`, instead of the mutable memoryview "
- "or integer array returned in numpy 1.13.") < 0) {
- return NULL;
- }
- /*
- * In the future all the code below will be replaced by
- *
- * For unstructured void types like V4, return a bytes object (copy).
- * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize);
- */
-
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "tried to get void-array with object members as buffer.");
- return NULL;
- }
- itemsize = PyArray_DESCR(ap)->elsize;
-
-#if defined(NPY_PY3K)
- /*
- * Return a byte array; there are no plain buffer objects on Py3
- */
- {
- npy_intp dims[1], strides[1];
- dims[0] = itemsize;
- strides[0] = 1;
- descr = PyArray_DescrNewFromType(NPY_BYTE);
- u = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr, 1, dims, strides, ip,
- PyArray_ISWRITEABLE(ap) ? NPY_ARRAY_WRITEABLE : 0,
- NULL);
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(u, (PyObject *)ap) < 0) {
- Py_DECREF(u);
- return NULL;
- }
- }
-#else
- /*
- * default is to return buffer object pointing to
- * current item a view of it
- */
- if (PyArray_ISWRITEABLE(ap)) {
- if (array_might_be_written(ap) < 0) {
- return NULL;
- }
- u = (PyArrayObject *)PyBuffer_FromReadWriteMemory(ip, itemsize);
- }
- else {
- u = (PyArrayObject *)PyBuffer_FromMemory(ip, itemsize);
- }
-#endif
-
- if (u == NULL) {
- return NULL;
- }
- return (PyObject *)u;
+ return PyBytes_FromStringAndSize(ip, descr->elsize);
}
*/
NPY_NO_EXPORT int
_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
- npy_intp *offset_p)
+ npy_intp *offset_p, char *dstdata)
{
PyObject *key;
PyObject *tup;
}
((PyArrayObject_fields *)(arr))->descr = new;
- if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ if ((new->alignment > 1) &&
+ ((((uintptr_t)dstdata + offset) % new->alignment) != 0)) {
PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
}
else {
if (PyArray_EquivTypes(srcdescr, dstdescr)) {
for (i = 0; i < names_size; i++) {
/* neither line can ever fail, in principle */
- if (_setup_field(i, dstdescr, dummy, &offset)) {
+ if (_setup_field(i, dstdescr, dummy, &offset, dstdata)) {
return -1;
}
PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
PyObject *item;
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
for (i = 0; i < names_size; i++) {
/* temporarily make ap have only this field */
- if (_setup_field(i, descr, ap, &offset) == -1) {
+ if (_setup_field(i, descr, ap, &offset, ip) == -1) {
failed = 1;
break;
}
return -1;
}
Py_INCREF(descr->subarray->base);
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- descr->subarray->base, shape.len, shape.ptr,
- NULL, ip, PyArray_FLAGS(ap), NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, descr->subarray->base,
+ shape.len, shape.ptr, NULL, ip,
+ PyArray_FLAGS(ap), NULL, (PyObject *)ap);
npy_free_cache_dim_obj(shape);
if (!ret) {
return -1;
}
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(ret, (PyObject *)ap) < 0) {
- Py_DECREF(ret);
- return -1;
- }
- PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
res = PyArray_CopyObject(ret, op);
Py_DECREF(ret);
return res;
*****************************************************************************
*/
-
-#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
-/*
- * Disable harmless compiler warning "4116: unnamed type definition in
- * parentheses" which is caused by the _ALIGN macro.
- */
-#if defined(_MSC_VER)
-#pragma warning(disable:4116)
-#endif
-
-
/**begin repeat
*
* #from = VOID, STRING, UNICODE#
#include "common.h"
-extern NPY_NO_EXPORT PyArray_Descr LONGLONG_Descr;
-extern NPY_NO_EXPORT PyArray_Descr LONG_Descr;
-extern NPY_NO_EXPORT PyArray_Descr INT_Descr;
-
NPY_NO_EXPORT int
set_typeinfo(PyObject *dict);
return 1;
}
+/*
+ * Fill in str with an appropriate PEP 3118 format string, based on
+ * descr. For structured dtypes, calls itself recursively. Each call extends
+ * str at offset then updates offset, and uses descr->byteorder, (and
+ * possibly the byte order in obj) to determine the byte-order char.
+ *
+ * Returns 0 for success, -1 for failure
+ */
static int
_buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
PyObject* obj, Py_ssize_t *offset,
PyObject *item, *subarray_tuple;
Py_ssize_t total_count = 1;
Py_ssize_t dim_size;
+ Py_ssize_t old_offset;
char buf[128];
- int old_offset;
int ret;
if (PyTuple_Check(descr->subarray->shape)) {
return ret;
}
else if (PyDataType_HASFIELDS(descr)) {
- int base_offset = *offset;
+ Py_ssize_t base_offset = *offset;
_append_str(str, "T{");
for (k = 0; k < PyTuple_GET_SIZE(descr->names); ++k) {
PyObject *name, *item, *offset_obj, *tmp;
PyArray_Descr *child;
char *p;
- Py_ssize_t len;
- int new_offset;
+ Py_ssize_t len, new_offset;
+ int ret;
name = PyTuple_GET_ITEM(descr->names, k);
item = PyDict_GetItem(descr->fields, name);
}
/* Insert child item */
- _buffer_format_string(child, str, obj, offset,
+ ret = _buffer_format_string(child, str, obj, offset,
active_byteorder);
+ if (ret < 0) {
+ return -1;
+ }
/* Insert field name */
#if defined(NPY_PY3K)
case NPY_CFLOAT: if (_append_str(str, "Zf")) return -1; break;
case NPY_CDOUBLE: if (_append_str(str, "Zd")) return -1; break;
case NPY_CLONGDOUBLE: if (_append_str(str, "Zg")) return -1; break;
- /* XXX: datetime */
- /* XXX: timedelta */
+ /* XXX NPY_DATETIME */
+ /* XXX NPY_TIMEDELTA */
case NPY_OBJECT: if (_append_char(str, 'O')) return -1; break;
case NPY_STRING: {
char buf[128];
info = malloc(sizeof(_buffer_info_t));
if (info == NULL) {
+ PyErr_NoMemory();
goto fail;
}
- if (PyArray_IsScalar(obj, Generic)) {
+ if (PyArray_IsScalar(obj, Datetime) || PyArray_IsScalar(obj, Timedelta)) {
+ /*
+ * Special case datetime64 scalars to remain backward compatible.
+ * This will change in a future version.
+ * Note arrays of datetime64 and strutured arrays with datetime64
+ * fields will not hit this code path and are currently unsupported
+ * in _buffer_format_string.
+ */
+ _append_char(&fmt, 'B');
+ _append_char(&fmt, '\0');
+ info->ndim = 1;
+ info->shape = malloc(sizeof(Py_ssize_t) * 2);
+ if (info->shape == NULL) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ info->strides = info->shape + info->ndim;
+ info->shape[0] = 8;
+ info->strides[0] = 1;
+ info->format = fmt.s;
+ return info;
+ }
+ else if (PyArray_IsScalar(obj, Generic)) {
descr = PyArray_DescrFromScalar(obj);
if (descr == NULL) {
goto fail;
else {
info->shape = malloc(sizeof(Py_ssize_t) * PyArray_NDIM(arr) * 2 + 1);
if (info->shape == NULL) {
+ PyErr_NoMemory();
goto fail;
}
info->strides = info->shape + PyArray_NDIM(arr);
/* Fill in information */
info = _buffer_get_info(self);
if (info == NULL) {
- PyErr_SetString(PyExc_BufferError,
- "could not get scalar buffer information");
goto fail;
}
}
#endif
view->len = elsize;
+ if (PyArray_IsScalar(self, Datetime) || PyArray_IsScalar(self, Timedelta)) {
+ elsize = 1; /* descr->elsize,char is 8,'M', but we return 1,'B' */
+ }
view->itemsize = elsize;
Py_DECREF(descr);
}
if (!out) {
- rp = (PyArrayObject *)PyArray_New(Py_TYPE(ap), PyArray_NDIM(ap)-1,
- PyArray_DIMS(ap), NPY_INTP,
- NULL, NULL, 0, 0,
- (PyObject *)ap);
+ rp = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(ap), PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(ap) - 1, PyArray_DIMS(ap), NULL, NULL,
+ 0, (PyObject *)ap);
if (rp == NULL) {
goto fail;
}
}
if (!out) {
- rp = (PyArrayObject *)PyArray_New(Py_TYPE(ap), PyArray_NDIM(ap)-1,
- PyArray_DIMS(ap), NPY_INTP,
- NULL, NULL, 0, 0,
- (PyObject *)ap);
+ rp = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(ap), PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(ap) - 1, PyArray_DIMS(ap), NULL, NULL,
+ 0, (PyObject *)ap);
if (rp == NULL) {
goto fail;
}
* __len__ is not defined.
*/
if (maxdims == 0 || !PySequence_Check(obj) || PySequence_Size(obj) < 0) {
- // clear any PySequence_Size error, which corrupts further calls to it
+ /* clear any PySequence_Size error which corrupts further calls */
PyErr_Clear();
if (*out_dtype == NULL || (*out_dtype)->type_num != NPY_OBJECT) {
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "lowlevel_strided_loops.h" /* for npy_bswap8 */
#include "alloc.h"
+#include "ctors.h"
#include "common.h"
npy_intp stride = -PyArray_STRIDE(arr_bins, 0);
void *data = (void *)(PyArray_BYTES(arr_bins) - stride * (shape - 1));
- arr_tmp = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &shape,
- NPY_DOUBLE, &stride, data, 0,
- PyArray_FLAGS(arr_bins), NULL);
+ arr_tmp = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, PyArray_DescrFromType(NPY_DOUBLE),
+ 1, &shape, &stride, data,
+ PyArray_FLAGS(arr_bins), NULL, (PyObject *)arr_bins);
+ Py_DECREF(arr_bins);
if (!arr_tmp) {
goto fail;
}
-
- if (PyArray_SetBaseObject(arr_tmp, (PyObject *)arr_bins) < 0) {
-
- Py_DECREF(arr_tmp);
- goto fail;
- }
arr_bins = arr_tmp;
}
for (i = 0; i < dimensions.len; ++i) {
PyArrayObject *view;
- view = (PyArrayObject *)PyArray_New(&PyArray_Type, ret_ndim-1,
- ret_dims, NPY_INTP,
- ret_strides,
- PyArray_BYTES(ret_arr) + i*sizeof(npy_intp),
- 0, NPY_ARRAY_WRITEABLE, NULL);
+ view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ ret_ndim - 1, ret_dims, ret_strides,
+ PyArray_BYTES(ret_arr) + i*sizeof(npy_intp),
+ NPY_ARRAY_WRITEABLE, NULL, (PyObject *)ret_arr);
if (view == NULL) {
goto fail;
}
- Py_INCREF(ret_arr);
- if (PyArray_SetBaseObject(view, (PyObject *)ret_arr) < 0) {
- Py_DECREF(view);
- goto fail;
- }
PyTuple_SET_ITEM(ret_tuple, i, PyArray_Return(view));
}
if (PyArray_NDIM(new) == 0) {
char *optr, *iptr;
- out = (PyArrayObject *)PyArray_New(Py_TYPE(new), 0, NULL, NPY_UBYTE,
- NULL, NULL, 0, 0, NULL);
+ out = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(new), PyArray_DescrFromType(NPY_UBYTE),
+ 0, NULL, NULL, NULL,
+ 0, NULL);
if (out == NULL) {
goto fail;
}
outdims[axis] = ((outdims[axis] - 1) >> 3) + 1;
/* Create output array */
- out = (PyArrayObject *)PyArray_New(Py_TYPE(new),
- PyArray_NDIM(new), outdims, NPY_UBYTE,
- NULL, NULL, 0, PyArray_ISFORTRAN(new), NULL);
+ out = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(new), PyArray_DescrFromType(NPY_UBYTE),
+ PyArray_NDIM(new), outdims, NULL, NULL,
+ PyArray_ISFORTRAN(new), NULL);
if (out == NULL) {
goto fail;
}
outdims[axis] <<= 3;
/* Create output array */
- out = (PyArrayObject *)PyArray_New(Py_TYPE(new),
- PyArray_NDIM(new), outdims, NPY_UBYTE,
- NULL, NULL, 0, PyArray_ISFORTRAN(new), NULL);
+ out = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(new), PyArray_DescrFromType(NPY_UBYTE),
+ PyArray_NDIM(new), outdims, NULL, NULL,
+ PyArray_ISFORTRAN(new), NULL);
if (out == NULL) {
goto fail;
}
else if (str[0] == 'm' || str[0] == 'M') {
*sortkind = NPY_MERGESORT;
}
+ else if (str[0] == 's' || str[0] == 'S') {
+ /* mergesort is the only stable sorting method in numpy */
+ *sortkind = NPY_MERGESORT;
+ }
else {
PyErr_Format(PyExc_ValueError,
"%s is an unrecognized kind of sort",
subtype = Py_TYPE(self);
}
- if (type != NULL && (PyArray_FLAGS(self) & NPY_ARRAY_WARN_ON_WRITE)) {
+ dtype = PyArray_DESCR(self);
+
+ if (type != NULL && !PyArray_EquivTypes(dtype, type) &&
+ (PyArray_FLAGS(self) & NPY_ARRAY_WARN_ON_WRITE)) {
const char *msg =
"Numpy has detected that you may be viewing or writing to an array "
"returned by selecting multiple fields in a structured array. \n\n"
- "This code may break in numpy 1.13 because this will return a view "
+ "This code may break in numpy 1.16 because this will return a view "
"instead of a copy -- see release notes for details.";
/* 2016-09-19, 1.12 */
if (DEPRECATE_FUTUREWARNING(msg) < 0) {
flags = PyArray_FLAGS(self);
- dtype = PyArray_DESCR(self);
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr_int(subtype,
- dtype,
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_DATA(self),
- flags,
- (PyObject *)self, 0, 1);
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(
+ subtype, dtype,
+ PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self),
+ PyArray_DATA(self),
+ flags, (PyObject *)self, (PyObject *)self,
+ 0, 1);
if (ret == NULL) {
Py_XDECREF(type);
return NULL;
}
- /* Set the base object */
- Py_INCREF(self);
- if (PyArray_SetBaseObject(ret, (PyObject *)self) < 0) {
- Py_DECREF(ret);
- Py_XDECREF(type);
- return NULL;
- }
-
if (type != NULL) {
if (PyObject_SetAttrString((PyObject *)ret, "dtype",
(PyObject *)type) < 0) {
#include "npy_config.h"
+#include "npy_import.h"
#include "npy_pycompat.h"
#include "multiarraymodule.h"
s = start = malloc(strlen(sep)+3);
if (s == NULL) {
+ PyErr_NoMemory();
return NULL;
}
/* add space to front if there isn't one */
NPY_NO_EXPORT PyObject *
PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj, int zeroed,
+ int flags, PyObject *obj, PyObject *base, int zeroed,
int allow_emptystring)
{
PyArrayObject_fields *fa;
}
nd =_update_descr_and_dimensions(&descr, newdims,
newstrides, nd);
- ret = PyArray_NewFromDescr_int(subtype, descr, nd, newdims,
- newstrides,
- data, flags, obj, zeroed,
- allow_emptystring);
+ ret = PyArray_NewFromDescr_int(
+ subtype, descr,
+ nd, newdims, newstrides, data,
+ flags, obj, base,
+ zeroed, allow_emptystring);
return ret;
}
}
else {
fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY);
- fa->flags = (fa->flags & ~NPY_ARRAY_UPDATEIFCOPY);
+ fa->flags &= ~NPY_ARRAY_UPDATEIFCOPY;
}
fa->descr = descr;
fa->base = (PyObject *)NULL;
*/
PyArray_UpdateFlags((PyArrayObject *)fa, NPY_ARRAY_UPDATE_ALL);
+ /* Set the base object. It's important to do it here so that
+ * __array_finalize__ below receives it
+ */
+ if (base != NULL) {
+ Py_INCREF(base);
+ if (PyArray_SetBaseObject((PyArrayObject *)fa, base) < 0) {
+ goto fail;
+ }
+ }
+
/*
* call the __array_finalize__
* method if a subtype.
* true, dtype will be decrefed.
*/
NPY_NO_EXPORT PyObject *
-PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
- npy_intp *dims, npy_intp *strides, void *data,
+PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
+ int nd, npy_intp *dims, npy_intp *strides, void *data,
int flags, PyObject *obj)
+{
+ return PyArray_NewFromDescrAndBase(
+ subtype, descr,
+ nd, dims, strides, data,
+ flags, obj, NULL);
+}
+
+/*
+ * Sets the base object using PyArray_SetBaseObject
+ */
+NPY_NO_EXPORT PyObject *
+PyArray_NewFromDescrAndBase(
+ PyTypeObject *subtype, PyArray_Descr *descr,
+ int nd, npy_intp *dims, npy_intp *strides, void *data,
+ int flags, PyObject *obj, PyObject *base)
{
return PyArray_NewFromDescr_int(subtype, descr, nd,
dims, strides, data,
- flags, obj, 0, 0);
+ flags, obj, base, 0, 0);
}
/*NUMPY_API
}
+NPY_NO_EXPORT PyArray_Descr *
+_dtype_from_buffer_3118(PyObject *memoryview)
+{
+ PyArray_Descr *descr;
+ Py_buffer *view = PyMemoryView_GET_BUFFER(memoryview);
+ if (view->format != NULL) {
+ descr = _descriptor_from_pep3118_format(view->format);
+ if (descr == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ /* If no format is specified, just assume a byte array
+ * TODO: void would make more sense here, as it wouldn't null
+ * terminate.
+ */
+ descr = PyArray_DescrNewFromType(NPY_STRING);
+ descr->elsize = view->itemsize;
+ }
+ return descr;
+}
+
+
+/*
+ * Call the python _is_from_ctypes
+ */
NPY_NO_EXPORT int
-_array_from_buffer_3118(PyObject *obj, PyObject **out)
+_is_from_ctypes(PyObject *obj) {
+ PyObject *ret_obj;
+ static PyObject *py_func = NULL;
+
+ npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func);
+
+ if (py_func == NULL) {
+ return -1;
+ }
+ ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL);
+ if (ret_obj == NULL) {
+ return -1;
+ }
+
+ return PyObject_IsTrue(ret_obj);
+}
+
+
+NPY_NO_EXPORT PyObject *
+_array_from_buffer_3118(PyObject *memoryview)
{
/* PEP 3118 */
- PyObject *memoryview;
Py_buffer *view;
PyArray_Descr *descr = NULL;
- PyObject *r;
- int nd, flags, k;
+ PyObject *r = NULL;
+ int nd, flags;
Py_ssize_t d;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
- memoryview = PyMemoryView_FromObject(obj);
- if (memoryview == NULL) {
- PyErr_Clear();
- return -1;
+ view = PyMemoryView_GET_BUFFER(memoryview);
+ nd = view->ndim;
+ descr = _dtype_from_buffer_3118(memoryview);
+
+ if (descr == NULL) {
+ return NULL;
}
- view = PyMemoryView_GET_BUFFER(memoryview);
- if (view->format != NULL) {
- descr = _descriptor_from_pep3118_format(view->format);
- if (descr == NULL) {
- PyObject *msg;
- msg = PyBytes_FromFormat("Invalid PEP 3118 format string: '%s'",
- view->format);
- PyErr_WarnEx(PyExc_RuntimeWarning, PyBytes_AS_STRING(msg), 0);
- Py_DECREF(msg);
- goto fail;
+ /* Sanity check */
+ if (descr->elsize != view->itemsize) {
+ /* Ctypes has bugs in its PEP3118 implementation, which we need to
+ * work around.
+ *
+ * bpo-10746
+ * bpo-32780
+ * bpo-32782
+ *
+ * Note that even if the above are fixed in master, we have to drop the
+ * early patch versions of python to actually make use of the fixes.
+ */
+
+ int is_ctypes = _is_from_ctypes(view->obj);
+ if (is_ctypes < 0) {
+ /* This error is not useful */
+ PyErr_WriteUnraisable(view->obj);
+ is_ctypes = 0;
+ }
+
+ if (!is_ctypes) {
+ /* This object has no excuse for a broken PEP3118 buffer */
+ PyErr_Format(
+ PyExc_RuntimeError,
+ "Item size %zd for PEP 3118 buffer format "
+ "string %s does not match the dtype %c item size %d.",
+ view->itemsize, view->format, descr->type,
+ descr->elsize);
+ Py_DECREF(descr);
+ return NULL;
}
- /* Sanity check */
- if (descr->elsize != view->itemsize) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "Item size computed from the PEP 3118 buffer format "
- "string does not match the actual item size.",
- 0);
- goto fail;
+ if (PyErr_Warn(
+ PyExc_RuntimeWarning,
+ "A builtin ctypes object gave a PEP3118 format "
+ "string that does not match its itemsize, so a "
+ "best-guess will be made of the data type. "
+ "Newer versions of python may behave correctly.") < 0) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+
+ /* Thankfully, np.dtype(ctypes_type) works in most cases.
+ * For an array input, this produces a dtype containing all the
+ * dimensions, so the array is now 0d.
+ */
+ nd = 0;
+ descr = (PyArray_Descr *)PyObject_CallFunctionObjArgs(
+ (PyObject *)&PyArrayDescr_Type, Py_TYPE(view->obj), NULL);
+ if (descr == NULL) {
+ return NULL;
+ }
+ if (descr->elsize != view->len) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "For the given ctypes object, neither the item size "
+ "computed from the PEP 3118 buffer format nor from "
+ "converting the type to a np.dtype matched the actual "
+ "size. This is a bug both in python and numpy");
+ Py_DECREF(descr);
+ return NULL;
}
- }
- else {
- descr = PyArray_DescrNewFromType(NPY_STRING);
- descr->elsize = view->itemsize;
}
- nd = view->ndim;
if (view->shape != NULL) {
- if (nd >= NPY_MAXDIMS || nd < 0) {
+ int k;
+ if (nd > NPY_MAXDIMS || nd < 0) {
+ PyErr_Format(PyExc_RuntimeError,
+ "PEP3118 dimensions do not satisfy 0 <= ndim <= NPY_MAXDIMS");
goto fail;
}
for (k = 0; k < nd; ++k) {
- if (k >= NPY_MAXDIMS) {
- goto fail;
- }
shape[k] = view->shape[k];
}
if (view->strides != NULL) {
strides[0] = view->itemsize;
}
else if (nd > 1) {
- PyErr_WarnEx(PyExc_RuntimeWarning,
- "ndim computed from the PEP 3118 buffer format "
- "is greater than 1, but shape is NULL.",
- 0);
+ PyErr_SetString(PyExc_RuntimeError,
+ "ndim computed from the PEP 3118 buffer format "
+ "is greater than 1, but shape is NULL.");
goto fail;
}
}
flags = NPY_ARRAY_BEHAVED & (view->readonly ? ~NPY_ARRAY_WRITEABLE : ~0);
- r = PyArray_NewFromDescr(&PyArray_Type, descr,
- nd, shape, strides, view->buf,
- flags, NULL);
- if (r == NULL ||
- PyArray_SetBaseObject((PyArrayObject *)r, memoryview) < 0) {
- Py_XDECREF(r);
- Py_DECREF(memoryview);
- return -1;
- }
- PyArray_UpdateFlags((PyArrayObject *)r, NPY_ARRAY_UPDATE_ALL);
+ r = PyArray_NewFromDescrAndBase(
+ &PyArray_Type, descr,
+ nd, shape, strides, view->buf,
+ flags, NULL, memoryview);
+ return r;
- *out = r;
- return 0;
fail:
+ Py_XDECREF(r);
Py_XDECREF(descr);
- Py_DECREF(memoryview);
- return -1;
+ return NULL;
}
}
/* If op supports the PEP 3118 buffer interface */
- if (!PyBytes_Check(op) && !PyUnicode_Check(op) &&
- _array_from_buffer_3118(op, (PyObject **)out_arr) == 0) {
- if (writeable
- && PyArray_FailUnlessWriteable(*out_arr, "PEP 3118 buffer") < 0) {
- Py_DECREF(*out_arr);
- return -1;
+ if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+
+ PyObject *memoryview = PyMemoryView_FromObject(op);
+ if (memoryview == NULL) {
+ PyErr_Clear();
+ }
+ else {
+ PyObject *arr = _array_from_buffer_3118(memoryview);
+ Py_DECREF(memoryview);
+ if (arr == NULL) {
+ return -1;
+ }
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)arr, "PEP 3118 buffer") < 0) {
+ Py_DECREF(arr);
+ return -1;
+ }
+ *out_arr = (PyArrayObject *)arr;
+ return 0;
}
- return (*out_arr) == NULL ? -1 : 0;
}
/* If op supports the __array_struct__ or __array_interface__ interface */
}
}
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, thetype,
- inter->nd, inter->shape,
- inter->strides, inter->data,
- inter->flags, NULL);
- Py_INCREF(input);
- if (PyArray_SetBaseObject(ret, input) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, thetype,
+ inter->nd, inter->shape, inter->strides, inter->data,
+ inter->flags, NULL, input);
Py_DECREF(attr);
- PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
return (PyObject *)ret;
fail:
}
}
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype,
- n, dims,
- NULL, data,
- dataflags, NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, dtype,
+ n, dims, NULL, data,
+ dataflags, NULL, base);
if (ret == NULL) {
goto fail;
}
goto fail;
}
}
- if (base) {
- Py_INCREF(base);
- if (PyArray_SetBaseObject(ret, base) < 0) {
- Py_DECREF(ret);
- goto fail;
- }
- }
attr = PyDict_GetItemString(iface, "strides");
if (attr != NULL && attr != Py_None) {
if (!PyTuple_Check(attr)) {
type = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
}
- ret = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type,
- type,
- nd, dims,
- NULL, NULL,
- is_f_order, NULL, 1, 0);
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(
+ &PyArray_Type, type,
+ nd, dims, NULL, NULL,
+ is_f_order, NULL, NULL,
+ 1, 0);
if (ret == NULL) {
return NULL;
PyArray_ArrFuncs *funcs;
PyObject *obj;
int ret;
+ double delta, tmp_len;
NPY_BEGIN_THREADS_DEF;
- length = _arange_safe_ceil_to_intp((stop - start)/step);
- if (error_converting(length)) {
- return NULL;
+ delta = stop - start;
+ tmp_len = delta/step;
+
+ /* Underflow and divide-by-inf check */
+ if (tmp_len == 0.0 && delta != 0.0) {
+ if (npy_signbit(tmp_len)) {
+ length = 0;
+ }
+ else {
+ length = 1;
+ }
+ }
+ else {
+ length = _arange_safe_ceil_to_intp(tmp_len);
+ if (error_converting(length)) {
+ return NULL;
+ }
}
if (length <= 0) {
_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx)
{
npy_intp len, tmp;
- PyObject *val;
+ PyObject *zero, *val;
+ int next_is_nonzero, val_is_zero;
double value;
*next = PyNumber_Subtract(stop, start);
}
return -1;
}
+
+ zero = PyInt_FromLong(0);
+ if (!zero) {
+ Py_DECREF(*next);
+ *next = NULL;
+ return -1;
+ }
+
+ next_is_nonzero = PyObject_RichCompareBool(*next, zero, Py_NE);
+ if (next_is_nonzero == -1) {
+ Py_DECREF(zero);
+ Py_DECREF(*next);
+ *next = NULL;
+ return -1;
+ }
val = PyNumber_TrueDivide(*next, step);
Py_DECREF(*next);
*next = NULL;
+
if (!val) {
+ Py_DECREF(zero);
return -1;
}
+
+ val_is_zero = PyObject_RichCompareBool(val, zero, Py_EQ);
+ Py_DECREF(zero);
+ if (val_is_zero == -1) {
+ Py_DECREF(val);
+ return -1;
+ }
+
if (cmplx && PyComplex_Check(val)) {
value = PyComplex_RealAsDouble(val);
if (error_converting(value)) {
if (error_converting(value)) {
return -1;
}
- len = _arange_safe_ceil_to_intp(value);
- if (error_converting(len)) {
- return -1;
+
+ /* Underflow and divide-by-inf check */
+ if (val_is_zero && next_is_nonzero) {
+ if (npy_signbit(value)) {
+ len = 0;
+ }
+ else {
+ len = 1;
+ }
+ }
+ else {
+ len = _arange_safe_ceil_to_intp(value);
+ if (error_converting(len)) {
+ return -1;
+ }
}
}
+
if (len > 0) {
*next = PyNumber_Add(start, step);
if (!*next) {
}
if (dtype->elsize == 0) {
/* Nothing to read, just create an empty array of the requested type */
- return PyArray_NewFromDescr_int(&PyArray_Type,
- dtype,
- 1, &num,
- NULL, NULL,
- 0, NULL, 0, 1);
+ return PyArray_NewFromDescr_int(
+ &PyArray_Type, dtype,
+ 1, &num, NULL, NULL,
+ 0, NULL, NULL,
+ 0, 1);
}
if ((sep == NULL) || (strlen(sep) == 0)) {
ret = array_fromfile_binary(fp, dtype, num, &nread);
}
}
- if ((ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- type,
- 1, &n,
- NULL, data,
- NPY_ARRAY_DEFAULT,
- NULL)) == NULL) {
- Py_DECREF(buf);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, type,
+ 1, &n, NULL, data,
+ NPY_ARRAY_DEFAULT, NULL, buf);
+ Py_DECREF(buf);
+ if (ret == NULL) {
return NULL;
}
if (!writeable) {
PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
}
- /* Store a reference for decref on deallocation */
- if (PyArray_SetBaseObject(ret, buf) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- PyArray_UpdateFlags(ret, NPY_ARRAY_ALIGNED);
return (PyObject *)ret;
}
npy_intp *dims, npy_intp *strides, void *data,
int flags, PyObject *obj);
+NPY_NO_EXPORT PyObject *
+PyArray_NewFromDescrAndBase(
+ PyTypeObject *subtype, PyArray_Descr *descr,
+ int nd, npy_intp *dims, npy_intp *strides, void *data,
+ int flags, PyObject *obj, PyObject *base);
+
NPY_NO_EXPORT PyObject *
PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj, int zeroed,
+ int flags, PyObject *obj, PyObject *base, int zeroed,
int allow_emptystring);
NPY_NO_EXPORT PyObject *PyArray_New(PyTypeObject *, int nd, npy_intp *,
us_meta.base = NPY_FR_m;
}
else if (td % (24*60*60*1000000LL) != 0) {
- us_meta.base = NPY_FR_D;
+ us_meta.base = NPY_FR_h;
}
else if (td % (7*24*60*60*1000000LL) != 0) {
+ us_meta.base = NPY_FR_D;
+ }
+ else {
us_meta.base = NPY_FR_W;
}
us_meta.num = 1;
return 0;
}
- /* Python date object -> 'D' */
- else if (PyDate_Check(obj)) {
+ /* Python datetime object -> 'us' */
+ else if (PyDateTime_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_D;
+ tmp_meta.base = NPY_FR_us;
tmp_meta.num = 1;
/* Combine it with 'meta' */
return 0;
}
- /* Python datetime object -> 'us' */
- else if (PyDateTime_Check(obj)) {
+ /* Python date object -> 'D' */
+ else if (PyDate_Check(obj)) {
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = NPY_FR_us;
+ tmp_meta.base = NPY_FR_D;
tmp_meta.num = 1;
/* Combine it with 'meta' */
}
for (i = 0; i < len; ++i) {
+ int ret;
PyObject *f = PySequence_GetItem(obj, i);
if (f == NULL) {
return -1;
}
- if (f == obj) {
- Py_DECREF(f);
- return 0;
- }
- if (recursive_find_object_datetime64_type(f, meta) < 0) {
+ if (Npy_EnterRecursiveCall(" in recursive_find_object_datetime64_type") != 0) {
Py_DECREF(f);
return -1;
}
+ ret = recursive_find_object_datetime64_type(f, meta);
+ Py_LeaveRecursiveCall();
Py_DECREF(f);
+ if (ret < 0) {
+ return ret;
+ }
}
return 0;
}
for (i = 0; i < len; ++i) {
+ int ret;
PyObject *f = PySequence_GetItem(obj, i);
if (f == NULL) {
return -1;
}
- if (f == obj) {
- Py_DECREF(f);
- return 0;
- }
- if (recursive_find_object_timedelta64_type(f, meta) < 0) {
+ if (Npy_EnterRecursiveCall(" in recursive_find_object_timedelta64_type") != 0) {
Py_DECREF(f);
return -1;
}
+ ret = recursive_find_object_timedelta64_type(f, meta);
+ Py_LeaveRecursiveCall();
Py_DECREF(f);
+ if (ret < 0) {
+ return ret;
+ }
}
return 0;
}
/* Leading '-' sign for negative year */
- if (*substr == '-') {
+ if (*substr == '-' || *substr == '+') {
++substr;
--sublen;
}
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "descriptor.h"
#include "alloc.h"
+#include "assert.h"
/*
* offset: A starting offset.
/* derived type */
PyObject *newtup;
PyArray_Descr *derived;
- newtup = Py_BuildValue("NN", newdescr, length);
+ newtup = Py_BuildValue("N(N)", newdescr, length);
ret = PyArray_DescrConverter(newtup, &derived);
Py_DECREF(newtup);
if (ret == NPY_SUCCEED) {
}
static PyArray_Descr *
-_convert_from_tuple(PyObject *obj)
+_convert_from_tuple(PyObject *obj, int align)
{
PyArray_Descr *type, *res;
PyObject *val;
if (PyTuple_GET_SIZE(obj) != 2) {
return NULL;
}
- if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) {
- return NULL;
+ if (align) {
+ if (!PyArray_DescrAlignConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
+ return NULL;
+ }
}
+ else {
+ if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
+ return NULL;
+ }
+ }
val = PyTuple_GET_ITEM(obj,1);
/* try to interpret next item as a type */
res = _use_inherit(type, val, &errflag);
}
else if (PyTuple_Check(obj)) {
/* or a tuple */
- *at = _convert_from_tuple(obj);
+ *at = _convert_from_tuple(obj, 0);
if (*at == NULL){
if (PyErr_Occurred()) {
return NPY_FAIL;
if (!PyDataType_HASSUBARRAY(self)) {
return PyTuple_New(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
- */
- if (PyTuple_Check(self->subarray->shape)) {
- Py_INCREF(self->subarray->shape);
- return (PyObject *)(self->subarray->shape);
- }
- return Py_BuildValue("(O)", self->subarray->shape);
+ assert(PyTuple_Check(self->subarray->shape));
+ Py_INCREF(self->subarray->shape);
+ return self->subarray->shape;
}
static PyObject *
arraydescr_ndim_get(PyArray_Descr *self)
{
+ Py_ssize_t ndim;
+
if (!PyDataType_HASSUBARRAY(self)) {
return PyInt_FromLong(0);
}
- /*TODO
- * self->subarray->shape should always be a tuple,
- * so this check should be unnecessary
+
+ /*
+ * PyTuple_Size has built in check
+ * for tuple argument
*/
- if (PyTuple_Check(self->subarray->shape)) {
- Py_ssize_t ndim = PyTuple_Size(self->subarray->shape);
- return PyInt_FromLong(ndim);
- }
- /* consistent with arraydescr_shape_get */
- return PyInt_FromLong(1);
+ ndim = PyTuple_Size(self->subarray->shape);
+ return PyInt_FromLong(ndim);
}
*at = _convert_from_commastring(tmp, 1);
Py_DECREF(tmp);
}
+ else if (PyTuple_Check(obj)) {
+ *at = _convert_from_tuple(obj, 1);
+ }
else if (PyList_Check(obj)) {
*at = _convert_from_array_descr(obj, 1);
}
#define DEBUG_ASSERT(stmnt) do {} while(0)
#endif
+static inline npy_uint64
+bitmask_u64(npy_uint32 n)
+{
+ return ~(~((npy_uint64)0) << n);
+}
+
+static inline npy_uint32
+bitmask_u32(npy_uint32 n)
+{
+ return ~(~((npy_uint32)0) << n);
+}
+
/*
* Get the log base 2 of a 32-bit unsigned integer.
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
return LogBase2_32((npy_uint32)val);
}
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
+static npy_uint32
+LogBase2_128(npy_uint64 hi, npy_uint64 lo)
+{
+ if (hi) {
+ return 64 + LogBase2_64(hi);
+ }
+
+ return LogBase2_64(lo);
+}
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */
/*
* Maximum number of 32 bit blocks needed in high precision arithmetic to print
npy_uint32 blocks[c_BigInt_MaxBlocks];
} BigInt;
+/*
+ * Dummy implementation of a memory manager for BigInts. Currently, only
+ * supports a single call to Dragon4, but that is OK because Dragon4
+ * does not release the GIL.
+ *
+ * We try to raise an error anyway if dragon4 re-enters, and this code serves
+ * as a placeholder if we want to make it re-entrant in the future.
+ *
+ * Each call to dragon4 uses 7 BigInts.
+ */
+#define BIGINT_DRAGON4_GROUPSIZE 7
+typedef struct {
+ BigInt bigints[BIGINT_DRAGON4_GROUPSIZE];
+ char repr[16384];
+} Dragon4_Scratch;
+
+static int _bigint_static_in_use = 0;
+static Dragon4_Scratch _bigint_static;
+
+static Dragon4_Scratch*
+get_dragon4_bigint_scratch(void) {
+ /* this test+set is not threadsafe, but no matter because we have GIL */
+ if (_bigint_static_in_use) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "numpy float printing code is not re-entrant. "
+ "Ping the devs to fix it.");
+ return NULL;
+ }
+ _bigint_static_in_use = 1;
+
+ /* in this dummy implementation we only return the static allocation */
+ return &_bigint_static;
+}
+
+static void
+free_dragon4_bigint_scratch(Dragon4_Scratch *mem){
+ _bigint_static_in_use = 0;
+}
+
/* Copy integer */
static void
BigInt_Copy(BigInt *dst, const BigInt *src)
static void
BigInt_Set_uint64(BigInt *i, npy_uint64 val)
{
- if (val > 0xFFFFFFFF) {
- i->blocks[0] = val & 0xFFFFFFFF;
- i->blocks[1] = (val >> 32) & 0xFFFFFFFF;
+ if (val > bitmask_u64(32)) {
+ i->blocks[0] = val & bitmask_u64(32);
+ i->blocks[1] = (val >> 32) & bitmask_u64(32);
i->length = 2;
}
else if (val != 0) {
- i->blocks[0] = val & 0xFFFFFFFF;
+ i->blocks[0] = val & bitmask_u64(32);
+ i->length = 1;
+ }
+ else {
+ i->length = 0;
+ }
+}
+
+#if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \
+ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) || \
+ defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \
+ defined(HAVE_LDOUBLE_IEEE_QUAD_BE))
+static void
+BigInt_Set_2x_uint64(BigInt *i, npy_uint64 hi, npy_uint64 lo)
+{
+ if (hi > bitmask_u64(32)) {
+ i->length = 4;
+ }
+ else if (hi != 0) {
+ i->length = 3;
+ }
+ else if (lo > bitmask_u64(32)) {
+ i->length = 2;
+ }
+ else if (lo != 0) {
i->length = 1;
}
else {
i->length = 0;
}
+
+ /* Note deliberate fallthrough in this switch */
+ switch (i->length) {
+ case 4:
+ i->blocks[3] = (hi >> 32) & bitmask_u64(32);
+ case 3:
+ i->blocks[2] = hi & bitmask_u64(32);
+ case 2:
+ i->blocks[1] = (lo >> 32) & bitmask_u64(32);
+ case 1:
+ i->blocks[0] = lo & bitmask_u64(32);
+ }
}
+#endif /* DOUBLE_DOUBLE and QUAD */
static void
BigInt_Set_uint32(BigInt *i, npy_uint32 val)
{
if (val != 0) {
i->blocks[0] = val;
- i->length = (val != 0);
+ i->length = 1;
}
else {
i->length = 0;
}
}
+/*
+ * Returns 1 if the value is zero
+ */
+static int
+BigInt_IsZero(const BigInt *i)
+{
+ return i->length == 0;
+}
+
+/*
+ * Returns 1 if the value is even
+ */
+static int
+BigInt_IsEven(const BigInt *i)
+{
+ return (i->length == 0) || ( (i->blocks[0] % 2) == 0);
+}
+
/*
* Returns 0 if (lhs = rhs), negative if (lhs < rhs), positive if (lhs > rhs)
*/
npy_uint64 sum = carry + (npy_uint64)(*largeCur) +
(npy_uint64)(*smallCur);
carry = sum >> 32;
- *resultCur = sum & 0xFFFFFFFF;
+ *resultCur = sum & bitmask_u64(32);
++largeCur;
++smallCur;
++resultCur;
while (largeCur != largeEnd) {
npy_uint64 sum = carry + (npy_uint64)(*largeCur);
carry = sum >> 32;
- (*resultCur) = sum & 0xFFFFFFFF;
+ (*resultCur) = sum & bitmask_u64(32);
++largeCur;
++resultCur;
}
npy_uint64 product = (*resultCur) +
(*largeCur)*(npy_uint64)multiplier + carry;
carry = product >> 32;
- *resultCur = product & 0xFFFFFFFF;
+ *resultCur = product & bitmask_u64(32);
++largeCur;
++resultCur;
} while(largeCur != large->blocks + large->length);
DEBUG_ASSERT(resultCur < result->blocks + maxResultLen);
- *resultCur = (npy_uint32)(carry & 0xFFFFFFFF);
+ *resultCur = (npy_uint32)(carry & bitmask_u64(32));
}
}
const npy_uint32 *pLhsEnd = lhs->blocks + lhs->length;
for ( ; pLhsCur != pLhsEnd; ++pLhsCur, ++resultCur) {
npy_uint64 product = (npy_uint64)(*pLhsCur) * rhs + carry;
- *resultCur = (npy_uint32)(product & 0xFFFFFFFF);
+ *resultCur = (npy_uint32)(product & bitmask_u64(32));
carry = product >> 32;
}
npy_uint32 *end = result->blocks + result->length;
for ( ; cur != end; ++cur) {
npy_uint64 product = (npy_uint64)(*cur) * 10ull + carry;
- (*cur) = (npy_uint32)(product & 0xFFFFFFFF);
+ (*cur) = (npy_uint32)(product & bitmask_u64(32));
carry = product >> 32;
}
/* result = 10^exponent */
static void
-BigInt_Pow10(BigInt *result, npy_uint32 exponent)
+BigInt_Pow10(BigInt *result, npy_uint32 exponent, BigInt *temp)
{
- /* create two temporary values to reduce large integer copy operations */
- BigInt temp1;
- BigInt temp2;
- BigInt *curTemp = &temp1;
- BigInt *pNextTemp = &temp2;
+ /* use two temporary values to reduce large integer copy operations */
+ BigInt *curTemp = result;
+ BigInt *pNextTemp = temp;
npy_uint32 smallExponent;
npy_uint32 tableIdx = 0;
* initialize the result by looking up a 32-bit power of 10 corresponding to
* the first 3 bits
*/
- smallExponent = exponent & 0x7;
+ smallExponent = exponent & bitmask_u32(3);
BigInt_Set_uint32(curTemp, g_PowerOf10_U32[smallExponent]);
/* remove the low bits that we used for the 32-bit lookup table */
}
/* output the result */
- BigInt_Copy(result, curTemp);
+ if (curTemp != result) {
+ BigInt_Copy(result, curTemp);
+ }
}
-/* result = in * 10^exponent */
+/* in = in * 10^exponent */
static void
-BigInt_MultiplyPow10(BigInt *result, const BigInt *in, npy_uint32 exponent)
+BigInt_MultiplyPow10(BigInt *in, npy_uint32 exponent, BigInt *temp)
{
-
- /* create two temporary values to reduce large integer copy operations */
- BigInt temp1;
- BigInt temp2;
- BigInt *curTemp = &temp1;
- BigInt *pNextTemp = &temp2;
+ /* use two temporary values to reduce large integer copy operations */
+ BigInt *curTemp, *pNextTemp;
npy_uint32 smallExponent;
npy_uint32 tableIdx = 0;
* initialize the result by looking up a 32-bit power of 10 corresponding to
* the first 3 bits
*/
- smallExponent = exponent & 0x7;
+ smallExponent = exponent & bitmask_u32(3);
if (smallExponent != 0) {
- BigInt_Multiply_int(curTemp, in, g_PowerOf10_U32[smallExponent]);
+ BigInt_Multiply_int(temp, in, g_PowerOf10_U32[smallExponent]);
+ curTemp = temp;
+ pNextTemp = in;
}
else {
- BigInt_Copy(curTemp, in);
+ curTemp = in;
+ pNextTemp = temp;
}
/* remove the low bits that we used for the 32-bit lookup table */
/* multiply into the next temporary */
BigInt_Multiply(pNextTemp, curTemp, &g_PowerOf10_Big[tableIdx]);
- // swap to the next temporary
+ /* swap to the next temporary */
pSwap = curTemp;
curTemp = pNextTemp;
pNextTemp = pSwap;
}
/* output the result */
- BigInt_Copy(result, curTemp);
+ if (curTemp != in){
+ BigInt_Copy(in, curTemp);
+ }
}
/* result = 2^exponent */
*/
DEBUG_ASSERT(!divisor->length == 0 &&
divisor->blocks[divisor->length-1] >= 8 &&
- divisor->blocks[divisor->length-1] < 0xFFFFFFFF &&
+ divisor->blocks[divisor->length-1] < bitmask_u64(32) &&
dividend->length <= divisor->length);
/*
carry = product >> 32;
difference = (npy_uint64)*dividendCur
- - (product & 0xFFFFFFFF) - borrow;
+ - (product & bitmask_u64(32)) - borrow;
borrow = (difference >> 32) & 1;
- *dividendCur = difference & 0xFFFFFFFF;
+ *dividendCur = difference & bitmask_u64(32);
++divisorCur;
++dividendCur;
- (npy_uint64)*divisorCur - borrow;
borrow = (difference >> 32) & 1;
- *dividendCur = difference & 0xFFFFFFFF;
+ *dividendCur = difference & bitmask_u64(32);
++divisorCur;
++dividendCur;
* There is some more documentation of these changes on Ryan Juckett's website
* at http://www.ryanjuckett.com/programming/printing-floating-point-numbers/
*
- * Ryan Juckett's implementation did not implement "IEEE unbiased rounding",
- * except in the last digit. This has been added back, following the Burger &
- * Dybvig code, using the isEven variable.
+ * This code also has a few implementation differences from Ryan Juckett's
+ * version:
+ * 1. fixed overflow problems when mantissa was 64 bits (in float128 types),
+ * by replacing multiplication by 2 or 4 by BigInt_ShiftLeft calls.
+ * 2. Increased c_BigInt_MaxBlocks, for 128-bit floats
+ * 3. Added more entries to the g_PowerOf10_Big table, for 128-bit floats.
+ * 4. Added unbiased rounding calculation with isEven. Ryan Juckett's
+ * implementation did not implement "IEEE unbiased rounding", except in the
+ * last digit. This has been added back, following the Burger & Dybvig
+ * code, using the isEven variable.
*
* Arguments:
- * * mantissa - value significand
+ * * bigints - memory to store all bigints needed (7) for dragon4 computation.
+ * The first BigInt should be filled in with the mantissa.
* * exponent - value exponent in base 2
* * mantissaBit - index of the highest set mantissa bit
* * hasUnequalMargins - is the high margin twice as large as the low margin
* * pOutBuffer - buffer to output into
* * bufferSize - maximum characters that can be printed to pOutBuffer
* * pOutExponent - the base 10 exponent of the first digit
+ *
+ * Returns the number of digits written to the output buffer.
*/
static npy_uint32
-Dragon4(const npy_uint64 mantissa, const npy_int32 exponent,
+Dragon4(BigInt *bigints, const npy_int32 exponent,
const npy_uint32 mantissaBit, const npy_bool hasUnequalMargins,
const DigitMode digitMode, const CutoffMode cutoffMode,
npy_int32 cutoffNumber, char *pOutBuffer,
* Here, marginLow and marginHigh represent 1/2 of the distance to the next
* floating point value above/below the mantissa.
*
- * scaledMarginHigh is a pointer so that it can point to scaledMarginLow in
- * the case they must be equal to each other, otherwise it will point to
- * optionalMarginHigh.
+ * scaledMarginHigh will point to scaledMarginLow in the case they must be
+ * equal to each other, otherwise it will point to optionalMarginHigh.
*/
- BigInt scale;
- BigInt scaledValue;
- BigInt scaledMarginLow;
+ BigInt *mantissa = &bigints[0]; /* the only initialized bigint */
+ BigInt *scale = &bigints[1];
+ BigInt *scaledValue = &bigints[2];
+ BigInt *scaledMarginLow = &bigints[3];
BigInt *scaledMarginHigh;
- BigInt optionalMarginHigh;
+ BigInt *optionalMarginHigh = &bigints[4];
+
+ BigInt *temp1 = &bigints[5];
+ BigInt *temp2 = &bigints[6];
const npy_float64 log10_2 = 0.30102999566398119521373889472449;
npy_int32 digitExponent, cutoffExponent, hiBlock;
npy_uint32 outputDigit; /* current digit being output */
npy_uint32 outputLen;
- npy_bool isEven = (mantissa % 2) == 0;
+ npy_bool isEven = BigInt_IsEven(mantissa);
npy_int32 cmp;
/* values used to determine how to round */
DEBUG_ASSERT(bufferSize > 0);
/* if the mantissa is zero, the value is zero regardless of the exponent */
- if (mantissa == 0) {
+ if (BigInt_IsZero(mantissa)) {
*curDigit = '0';
*pOutExponent = 0;
return 1;
}
+ BigInt_Copy(scaledValue, mantissa);
+
if (hasUnequalMargins) {
/* if we have no fractional component */
if (exponent > 0) {
*/
/* scaledValue = 2 * 2 * mantissa*2^exponent */
- BigInt_Set_uint64(&scaledValue, mantissa);
- BigInt_ShiftLeft(&scaledValue, exponent + 2);
-
+ BigInt_ShiftLeft(scaledValue, exponent + 2);
/* scale = 2 * 2 * 1 */
- BigInt_Set_uint32(&scale, 4);
-
+ BigInt_Set_uint32(scale, 4);
/* scaledMarginLow = 2 * 2^(exponent-1) */
- BigInt_Pow2(&scaledMarginLow, exponent);
-
+ BigInt_Pow2(scaledMarginLow, exponent);
/* scaledMarginHigh = 2 * 2 * 2^(exponent-1) */
- BigInt_Pow2(&optionalMarginHigh, exponent + 1);
+ BigInt_Pow2(optionalMarginHigh, exponent + 1);
}
/* else we have a fractional exponent */
else {
*/
/* scaledValue = 2 * 2 * mantissa */
- BigInt_Set_uint64(&scaledValue, mantissa);
- BigInt_ShiftLeft(&scaledValue, 2);
-
+ BigInt_ShiftLeft(scaledValue, 2);
/* scale = 2 * 2 * 2^(-exponent) */
- BigInt_Pow2(&scale, -exponent + 2);
-
+ BigInt_Pow2(scale, -exponent + 2);
/* scaledMarginLow = 2 * 2^(-1) */
- BigInt_Set_uint32(&scaledMarginLow, 1);
-
+ BigInt_Set_uint32(scaledMarginLow, 1);
/* scaledMarginHigh = 2 * 2 * 2^(-1) */
- BigInt_Set_uint32(&optionalMarginHigh, 2);
+ BigInt_Set_uint32(optionalMarginHigh, 2);
}
/* the high and low margins are different */
- scaledMarginHigh = &optionalMarginHigh;
+ scaledMarginHigh = optionalMarginHigh;
}
else {
/* if we have no fractional component */
if (exponent > 0) {
/* scaledValue = 2 * mantissa*2^exponent */
- BigInt_Set_uint64(&scaledValue, mantissa);
- BigInt_ShiftLeft(&scaledValue, exponent + 1);
-
+ BigInt_ShiftLeft(scaledValue, exponent + 1);
/* scale = 2 * 1 */
- BigInt_Set_uint32(&scale, 2);
-
+ BigInt_Set_uint32(scale, 2);
/* scaledMarginLow = 2 * 2^(exponent-1) */
- BigInt_Pow2(&scaledMarginLow, exponent);
+ BigInt_Pow2(scaledMarginLow, exponent);
}
/* else we have a fractional exponent */
else {
*/
/* scaledValue = 2 * mantissa */
- BigInt_Set_uint64(&scaledValue, mantissa);
- BigInt_ShiftLeft(&scaledValue, 1);
-
+ BigInt_ShiftLeft(scaledValue, 1);
/* scale = 2 * 2^(-exponent) */
- BigInt_Pow2(&scale, -exponent + 1);
-
+ BigInt_Pow2(scale, -exponent + 1);
/* scaledMarginLow = 2 * 2^(-1) */
- BigInt_Set_uint32(&scaledMarginLow, 1);
+ BigInt_Set_uint32(scaledMarginLow, 1);
}
/* the high and low margins are equal */
- scaledMarginHigh = &scaledMarginLow;
+ scaledMarginHigh = scaledMarginLow;
}
/*
* <= log10(v) + log10(2)
* floor(log10(v)) < ceil((mantissaBit + exponent) * log10(2))
* <= floor(log10(v)) + 1
+ *
+ * Warning: This calculation assumes npy_float64 is an IEEE-binary64
+ * float. This line may need to be updated if this is not the case.
*/
digitExponent = (npy_int32)(
ceil((npy_float64)((npy_int32)mantissaBit + exponent) * log10_2 - 0.69));
/* Divide value by 10^digitExponent. */
if (digitExponent > 0) {
/* A positive exponent creates a division so we multiply the scale. */
- BigInt temp;
- BigInt_MultiplyPow10(&temp, &scale, digitExponent);
- BigInt_Copy(&scale, &temp);
+ BigInt_MultiplyPow10(scale, digitExponent, temp1);
}
else if (digitExponent < 0) {
/*
* A negative exponent creates a multiplication so we multiply up the
* scaledValue, scaledMarginLow and scaledMarginHigh.
*/
- BigInt pow10, temp;
- BigInt_Pow10(&pow10, -digitExponent);
+ BigInt *temp=temp1, *pow10=temp2;
+ BigInt_Pow10(pow10, -digitExponent, temp);
- BigInt_Multiply(&temp, &scaledValue, &pow10);
- BigInt_Copy(&scaledValue, &temp);
+ BigInt_Multiply(temp, scaledValue, pow10);
+ BigInt_Copy(scaledValue, temp);
- BigInt_Multiply(&temp, &scaledMarginLow, &pow10);
- BigInt_Copy(&scaledMarginLow, &temp);
+ BigInt_Multiply(temp, scaledMarginLow, pow10);
+ BigInt_Copy(scaledMarginLow, temp);
- if (scaledMarginHigh != &scaledMarginLow) {
- BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow);
+ if (scaledMarginHigh != scaledMarginLow) {
+ BigInt_Multiply2(scaledMarginHigh, scaledMarginLow);
}
}
/* If (value >= 1), our estimate for digitExponent was too low */
- if (BigInt_Compare(&scaledValue, &scale) >= 0) {
+ if (BigInt_Compare(scaledValue, scale) >= 0) {
/*
* The exponent estimate was incorrect.
* Increment the exponent and don't perform the premultiply needed
* Multiply larger by the output base to prepare for the first loop
* iteration.
*/
- BigInt_Multiply10(&scaledValue);
- BigInt_Multiply10(&scaledMarginLow);
- if (scaledMarginHigh != &scaledMarginLow) {
- BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow);
+ BigInt_Multiply10(scaledValue);
+ BigInt_Multiply10(scaledMarginLow);
+ if (scaledMarginHigh != scaledMarginLow) {
+ BigInt_Multiply2(scaledMarginHigh, scaledMarginLow);
}
}
* to be less than or equal to 429496729 which is the highest number that
* can be multiplied by 10 without overflowing to a new block.
*/
- DEBUG_ASSERT(scale.length > 0);
- hiBlock = scale.blocks[scale.length - 1];
+ DEBUG_ASSERT(scale->length > 0);
+ hiBlock = scale->blocks[scale->length - 1];
if (hiBlock < 8 || hiBlock > 429496729) {
npy_uint32 hiBlockLog2, shift;
DEBUG_ASSERT(hiBlockLog2 < 3 || hiBlockLog2 > 27);
shift = (32 + 27 - hiBlockLog2) % 32;
- BigInt_ShiftLeft(&scale, shift);
- BigInt_ShiftLeft(&scaledValue, shift);
- BigInt_ShiftLeft(&scaledMarginLow, shift);
- if (scaledMarginHigh != &scaledMarginLow) {
- BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow);
+ BigInt_ShiftLeft(scale, shift);
+ BigInt_ShiftLeft(scaledValue, shift);
+ BigInt_ShiftLeft(scaledMarginLow, shift);
+ if (scaledMarginHigh != scaledMarginLow) {
+ BigInt_Multiply2(scaledMarginHigh, scaledMarginLow);
}
}
* terminate early.
*/
for (;;) {
- BigInt scaledValueHigh;
+ BigInt *scaledValueHigh = temp1;
digitExponent = digitExponent-1;
/* divide out the scale to extract the digit */
outputDigit =
- BigInt_DivideWithRemainder_MaxQuotient9(&scaledValue, &scale);
+ BigInt_DivideWithRemainder_MaxQuotient9(scaledValue, scale);
DEBUG_ASSERT(outputDigit < 10);
/* update the high end of the value */
- BigInt_Add(&scaledValueHigh, &scaledValue, scaledMarginHigh);
+ BigInt_Add(scaledValueHigh, scaledValue, scaledMarginHigh);
/*
* stop looping if we are far enough away from our neighboring
* values or if we have reached the cutoff digit
*/
- cmp = BigInt_Compare(&scaledValue, &scaledMarginLow);
+ cmp = BigInt_Compare(scaledValue, scaledMarginLow);
low = isEven ? (cmp <= 0) : (cmp < 0);
- cmp = BigInt_Compare(&scaledValueHigh, &scale);
+ cmp = BigInt_Compare(scaledValueHigh, scale);
high = isEven ? (cmp >= 0) : (cmp > 0);
if (low | high | (digitExponent == cutoffExponent))
break;
++curDigit;
/* multiply larger by the output base */
- BigInt_Multiply10(&scaledValue);
- BigInt_Multiply10(&scaledMarginLow);
- if (scaledMarginHigh != &scaledMarginLow) {
- BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow);
+ BigInt_Multiply10(scaledValue);
+ BigInt_Multiply10(scaledMarginLow);
+ if (scaledMarginHigh != scaledMarginLow) {
+ BigInt_Multiply2(scaledMarginHigh, scaledMarginLow);
}
}
}
/* divide out the scale to extract the digit */
outputDigit =
- BigInt_DivideWithRemainder_MaxQuotient9(&scaledValue, &scale);
+ BigInt_DivideWithRemainder_MaxQuotient9(scaledValue, scale);
DEBUG_ASSERT(outputDigit < 10);
- if ((scaledValue.length == 0) | (digitExponent == cutoffExponent)) {
+ if ((scaledValue->length == 0) |
+ (digitExponent == cutoffExponent)) {
break;
}
++curDigit;
/* multiply larger by the output base */
- BigInt_Multiply10(&scaledValue);
+ BigInt_Multiply10(scaledValue);
}
}
* compare( scale * value, scale * 0.5 )
* compare( 2 * scale * value, scale )
*/
- BigInt_Multiply2_inplace(&scaledValue);
- compare = BigInt_Compare(&scaledValue, &scale);
+ BigInt_Multiply2_inplace(scaledValue);
+ compare = BigInt_Compare(scaledValue, scale);
roundDown = compare < 0;
/*
/*
- * Helper union to decompose a 16-bit IEEE float.
- * sign: 1 bit
- * exponent: 5 bits
- * mantissa: 10 bits
- */
-typedef union FloatUnion16
-{
- npy_uint16 integer;
-} FloatUnion16;
-
-npy_bool IsNegative_F16(FloatUnion16 *v) { return (v->integer >> 15) != 0; }
-npy_uint32 GetExponent_F16(FloatUnion16 *v) { return (v->integer >> 10) & 0x1F;}
-npy_uint32 GetMantissa_F16(FloatUnion16 *v) { return v->integer & 0x3FF; }
-
-
-/*
- * Helper union to decompose a 32-bit IEEE float.
- * sign: 1 bit
- * exponent: 8 bits
- * mantissa: 23 bits
- */
-typedef union FloatUnion32
-{
- npy_float32 floatingPoint;
- npy_uint32 integer;
-} FloatUnion32;
-
-npy_bool IsNegative_F32(FloatUnion32 *v) { return (v->integer >> 31) != 0; }
-npy_uint32 GetExponent_F32(FloatUnion32 *v) { return (v->integer >> 23) & 0xFF;}
-npy_uint32 GetMantissa_F32(FloatUnion32 *v) { return v->integer & 0x7FFFFF; }
-
-/*
- * Helper union to decompose a 64-bit IEEE float.
- * sign: 1 bit
- * exponent: 11 bits
- * mantissa: 52 bits
- */
-typedef union FloatUnion64
-{
- npy_float64 floatingPoint;
- npy_uint64 integer;
-} FloatUnion64;
-npy_bool IsNegative_F64(FloatUnion64 *v) { return (v->integer >> 63) != 0; }
-npy_uint32 GetExponent_F64(FloatUnion64 *v) { return (v->integer >> 52) & 0x7FF; }
-npy_uint64 GetMantissa_F64(FloatUnion64 *v) { return v->integer & 0xFFFFFFFFFFFFFull; }
-
-/*
- * Helper unions and datatype to decompose a 80-bit IEEE float
- * sign: 1 bit, second u64
- * exponent: 15 bits, second u64
- * intbit 1 bit, first u64
- * mantissa: 63 bits, first u64
- */
-
-/*
- * Since systems have different types of long doubles, and may not necessarily
- * have a 128-byte format we can use to pass values around, here we create
- * our own 128-bit storage type for convenience.
- */
-typedef struct FloatVal128 {
- npy_uint64 integer[2];
-} FloatVal128;
-npy_bool IsNegative_F128(FloatVal128 *v) {
- return ((v->integer[1] >> 15) & 0x1) != 0;
-}
-npy_uint32 GetExponent_F128(FloatVal128 *v) { return v->integer[1] & 0x7FFF; }
-npy_uint64 GetMantissa_F128(FloatVal128 *v) {
- return v->integer[0] & 0x7FFFFFFFFFFFFFFFull;
-}
-
-/*
- * then for each different definition of long double, we create a union to
- * unpack the float data safely. We can then copy these integers to a
- * FloatVal128.
+ * The FormatPositional and FormatScientific functions have been more
+ * significantly rewritten relative to Ryan Juckett's code.
+ *
+ * The binary16 and the various 128-bit float functions are new, and adapted
+ * from the 64 bit version. The python interface functions are new.
*/
-#ifdef NPY_FLOAT128
-typedef union FloatUnion128
-{
- npy_float128 floatingPoint;
- struct {
- npy_uint64 a;
- npy_uint16 b;
- } integer;
-} FloatUnion128;
-#endif
-
-#ifdef NPY_FLOAT96
-typedef union FloatUnion96
-{
- npy_float96 floatingPoint;
- struct {
- npy_uint64 a;
- npy_uint32 b;
- } integer;
-} FloatUnion96;
-#endif
-
-#ifdef NPY_FLOAT80
-typedef union FloatUnion80
-{
- npy_float80 floatingPoint;
- struct {
- npy_uint64 a;
- npy_uint16 b;
- } integer;
-} FloatUnion80;
-#endif
-/*
- * The main changes above this point, relative to Ryan Juckett's code, are:
- * 1. fixed overflow problems when mantissa was 64 bits (in float128 types),
- * by replacing multiplication by 2 or 4 by BigInt_ShiftLeft calls.
- * 2. Increased c_BigInt_MaxBlocks
- * 3. Added more entries to the g_PowerOf10_Big table
- * 4. Added unbiased rounding calculation with isEven
+/* Options struct for easy passing of Dragon4 options.
*
- * Below this point, the FormatPositional and FormatScientific functions have
- * been more significantly rewritten. The Dragon4_PrintFloat16 and
- * Dragon4_PrintFloat128 functions are new, and were adapted from the 64 and 32
- * bit versions. The python interfacing functions (in the header) are new.
+ * scientific - boolean controlling whether scientific notation is used
+ * digit_mode - whether to use unique or fixed fracional output
+ * cutoff_mode - whether 'precision' refers to toal digits, or digits past
+ * the decimal point.
+ * precision - When negative, prints as many digits as needed for a unique
+ * number. When positive specifies the maximum number of
+ * significant digits to print.
+ * sign - whether to always show sign
+ * trim_mode - how to treat trailing 0s and '.'. See TrimMode comments.
+ * digits_left - pad characters to left of decimal point. -1 for no padding
+ * digits_right - pad characters to right of decimal point. -1 for no padding.
+ * Padding adds whitespace until there are the specified
+ * number characters to sides of decimal point. Applies after
+ * trim_mode characters were removed. If digits_right is
+ * positive and the decimal point was trimmed, decimal point
+ * will be replaced by a whitespace character.
+ * exp_digits - Only affects scientific output. If positive, pads the
+ * exponent with 0s until there are this many digits. If
+ * negative, only use sufficient digits.
*/
-
+typedef struct Dragon4_Options {
+ npy_bool scientific;
+ DigitMode digit_mode;
+ CutoffMode cutoff_mode;
+ npy_int32 precision;
+ npy_bool sign;
+ TrimMode trim_mode;
+ npy_int32 digits_left;
+ npy_int32 digits_right;
+ npy_int32 exp_digits;
+} Dragon4_Options;
/*
* Outputs the positive number with positional notation: ddddd.dddd
* The output is always NUL terminated and the output length (not including the
* NUL) is returned.
+ *
* Arguments:
* buffer - buffer to output into
* bufferSize - maximum characters that can be printed to buffer
* signbit - value of the sign position. Should be '+', '-' or ''
* mantissaBit - index of the highest set mantissa bit
* hasUnequalMargins - is the high margin twice as large as the low margin
- * precision - Negative prints as many digits as are needed for a unique
- * number. Positive specifies the maximum number of significant
- * digits to print past the decimal point.
- * trim_mode - how to treat trailing 0s and '.'. See TrimMode comments.
- * digits_left - pad characters to left of decimal point. -1 for no padding
- * digits_right - pad characters to right of decimal point. -1 for no padding
- * padding adds whitespace until there are the specified
- * number characters to sides of decimal point. Applies after
- * trim_mode characters were removed. If digits_right is
- * positive and the decimal point was trimmed, decimal point
- * will be replaced by a whitespace character.
+ *
+ * See Dragon4_Options for description of remaining arguments.
*/
static npy_uint32
-FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
+FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
npy_int32 exponent, char signbit, npy_uint32 mantissaBit,
npy_bool hasUnequalMargins, DigitMode digit_mode,
CutoffMode cutoff_mode, npy_int32 precision,
/* always add decimal point, except for DprZeros mode */
if (trim_mode != TrimMode_DptZeros && numFractionDigits == 0 &&
- pos < maxPrintLen){
+ pos < maxPrintLen) {
buffer[pos++] = '.';
}
* when rounding, we may still end up with trailing zeros. Remove them
* depending on trim settings.
*/
- if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0){
+ if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0) {
while (buffer[pos-1] == '0') {
pos--;
numFractionDigits--;
npy_int32 shift = digits_left - (numWholeDigits + has_sign);
npy_int32 count = pos;
- if (count + shift > maxPrintLen){
+ if (count + shift > maxPrintLen) {
count = maxPrintLen - shift;
}
* Outputs the positive number with scientific notation: d.dddde[sign]ddd
* The output is always NUL terminated and the output length (not including the
* NUL) is returned.
+ *
* Arguments:
* buffer - buffer to output into
* bufferSize - maximum characters that can be printed to buffer
* signbit - value of the sign position. Should be '+', '-' or ''
* mantissaBit - index of the highest set mantissa bit
* hasUnequalMargins - is the high margin twice as large as the low margin
- * precision - Negative prints as many digits as are needed for a unique
- * number. Positive specifies the maximum number of significant
- * digits to print past the decimal point.
- * trim_mode - how to treat trailing 0s and '.'. See TrimMode comments.
- * digits_left - pad characters to left of decimal point. -1 for no padding
- * exp_digits - pads exponent with zeros until it has this many digits
+ *
+ * See Dragon4_Options for description of remaining arguments.
*/
static npy_uint32
-FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
+FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
npy_int32 exponent, char signbit, npy_uint32 mantissaBit,
npy_bool hasUnequalMargins, DigitMode digit_mode,
npy_int32 precision, TrimMode trim_mode,
leftchars = 1 + (signbit == '-' || signbit == '+');
if (digits_left > leftchars) {
int i;
- for (i = 0; i < digits_left - leftchars && bufferSize > 1; i++){
+ for (i = 0; i < digits_left - leftchars && bufferSize > 1; i++) {
*pCurOut = ' ';
pCurOut++;
--bufferSize;
/* always add decimal point, except for DprZeros mode */
if (trim_mode != TrimMode_DptZeros && numFractionDigits == 0 &&
- bufferSize > 1){
+ bufferSize > 1) {
*pCurOut = '.';
++pCurOut;
--bufferSize;
* when rounding, we may still end up with trailing zeros. Remove them
* depending on trim settings.
*/
- if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0){
+ if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0) {
--pCurOut;
while (*pCurOut == '0') {
--pCurOut;
DEBUG_ASSERT(printExponent < 100000);
/* get exp digits */
- for (i = 0; i < 5; i++){
+ for (i = 0; i < 5; i++) {
digits[i] = printExponent % 10;
printExponent /= 10;
}
}
exp_size = i;
/* write remaining digits to tmp buf */
- for (i = exp_size; i > 0; i--){
+ for (i = exp_size; i > 0; i--) {
exponentBuffer[2 + (exp_size-i)] = (char)('0' + digits[i-1]);
}
/* only print sign for inf values (though nan can have a sign set) */
if (signbit == '+') {
- if (pos < maxPrintLen-1){
+ if (pos < maxPrintLen-1) {
buffer[pos++] = '+';
}
}
else if (signbit == '-') {
- if (pos < maxPrintLen-1){
+ if (pos < maxPrintLen-1) {
buffer[pos++] = '-';
}
}
buffer[pos + printLen] = '\0';
/*
- * // XXX: Should we change this for numpy?
+ * For numpy we ignore unusual mantissa values for nan, but keep this
+ * code in case we change our mind later.
+ *
* // append HEX value
* if (maxPrintLen > 3) {
* printLen += PrintHex(buffer+3, bufferSize-3, mantissa,
}
/*
- * These functions print a floating-point number as a decimal string.
- * The output string is always NUL terminated and the string length (not
- * including the NUL) is returned.
+ * The functions below format a floating-point numbers stored in particular
+ * formats, as a decimal string. The output string is always NUL terminated
+ * and the string length (not including the NUL) is returned.
+ *
+ * For 16, 32 and 64 bit floats we assume they are the IEEE 754 type.
+ * For 128 bit floats we account for different definitions.
*
* Arguments are:
* buffer - buffer to output into
* bufferSize - maximum characters that can be printed to buffer
- * value - value significand
- * scientific - boolean controlling whether scientific notation is used
- * precision - If positive, specifies the number of decimals to show after
- * decimal point. If negative, sufficient digits to uniquely
- * specify the float will be output.
- * trim_mode - how to treat trailing zeros and decimal point. See TrimMode.
- * digits_right - pad the result with '' on the right past the decimal point
- * digits_left - pad the result with '' on the right past the decimal point
- * exp_digits - Only affects scientific output. If positive, pads the
- * exponent with 0s until there are this many digits. If
- * negative, only use sufficient digits.
+ * value - value to print
+ * opt - Dragon4 options, see above
+ */
+
+/*
+ * Helper function that takes Dragon4 parameters and options and
+ * calls Dragon4.
*/
static npy_uint32
-Dragon4_PrintFloat16(char *buffer, npy_uint32 bufferSize, npy_uint16 value,
- npy_bool scientific, DigitMode digit_mode,
- CutoffMode cutoff_mode, npy_int32 precision,
- npy_bool sign, TrimMode trim_mode, npy_int32 digits_left,
- npy_int32 digits_right, npy_int32 exp_digits)
+Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
+ npy_int32 exponent, char signbit, npy_uint32 mantissaBit,
+ npy_bool hasUnequalMargins, Dragon4_Options *opt)
{
- FloatUnion16 floatUnion;
- npy_uint32 floatExponent, floatMantissa;
+ /* format the value */
+ if (opt->scientific) {
+ return FormatScientific(buffer, bufferSize, mantissa, exponent,
+ signbit, mantissaBit, hasUnequalMargins,
+ opt->digit_mode, opt->precision,
+ opt->trim_mode, opt->digits_left,
+ opt->exp_digits);
+ }
+ else {
+ return FormatPositional(buffer, bufferSize, mantissa, exponent,
+ signbit, mantissaBit, hasUnequalMargins,
+ opt->digit_mode, opt->cutoff_mode,
+ opt->precision, opt->trim_mode,
+ opt->digits_left, opt->digits_right);
+ }
+}
+
+/*
+ * IEEE binary16 floating-point format
+ *
+ * sign: 1 bit
+ * exponent: 5 bits
+ * mantissa: 10 bits
+ */
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary16(
+ Dragon4_Scratch *scratch, npy_half *value, Dragon4_Options *opt)
+{
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ npy_uint16 val = *value;
+ npy_uint32 floatExponent, floatMantissa, floatSign;
npy_uint32 mantissa;
npy_int32 exponent;
}
/* deconstruct the floating point value */
- floatUnion.integer = value;
- floatExponent = GetExponent_F16(&floatUnion);
- floatMantissa = GetMantissa_F16(&floatUnion);
+ floatMantissa = val & bitmask_u32(10);
+ floatExponent = (val >> 10) & bitmask_u32(5);
+ floatSign = val >> 15;
/* output the sign */
- if (IsNegative_F16(&floatUnion)) {
+ if (floatSign != 0) {
signbit = '-';
}
- else if (sign) {
+ else if (opt->sign) {
signbit = '+';
}
/* if this is a special value */
- if (floatExponent == 0x1F) {
+ if (floatExponent == bitmask_u32(5)) {
return PrintInfNan(buffer, bufferSize, floatMantissa, 3, signbit);
}
/* else this is a number */
hasUnequalMargins = NPY_FALSE;
}
- /* format the value */
- if (scientific) {
- return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- precision, trim_mode, digits_left, exp_digits);
- }
- else {
- return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- cutoff_mode, precision, trim_mode,
- digits_left, digits_right);
- }
+ BigInt_Set_uint32(&bigints[0], mantissa);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent,
+ signbit, mantissaBit, hasUnequalMargins, opt);
}
+/*
+ * IEEE binary32 floating-point format
+ *
+ * sign: 1 bit
+ * exponent: 8 bits
+ * mantissa: 23 bits
+ */
static npy_uint32
-Dragon4_PrintFloat32(char *buffer, npy_uint32 bufferSize, npy_float32 value,
- npy_bool scientific, DigitMode digit_mode,
- CutoffMode cutoff_mode, npy_int32 precision,
- npy_bool sign, TrimMode trim_mode, npy_int32 digits_left,
- npy_int32 digits_right, npy_int32 exp_digits)
+Dragon4_PrintFloat_IEEE_binary32(
+ Dragon4_Scratch *scratch, npy_float32 *value,
+ Dragon4_Options *opt)
{
- FloatUnion32 floatUnion;
- npy_uint32 floatExponent, floatMantissa;
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ union
+ {
+ npy_float32 floatingPoint;
+ npy_uint32 integer;
+ } floatUnion;
+ npy_uint32 floatExponent, floatMantissa, floatSign;
npy_uint32 mantissa;
npy_int32 exponent;
}
/* deconstruct the floating point value */
- floatUnion.floatingPoint = value;
- floatExponent = GetExponent_F32(&floatUnion);
- floatMantissa = GetMantissa_F32(&floatUnion);
+ floatUnion.floatingPoint = *value;
+ floatMantissa = floatUnion.integer & bitmask_u32(23);
+ floatExponent = (floatUnion.integer >> 23) & bitmask_u32(8);
+ floatSign = floatUnion.integer >> 31;
/* output the sign */
- if (IsNegative_F32(&floatUnion)) {
+ if (floatSign != 0) {
signbit = '-';
}
- else if (sign) {
+ else if (opt->sign) {
signbit = '+';
}
/* if this is a special value */
- if (floatExponent == 0xFF) {
+ if (floatExponent == bitmask_u32(8)) {
return PrintInfNan(buffer, bufferSize, floatMantissa, 6, signbit);
}
/* else this is a number */
hasUnequalMargins = NPY_FALSE;
}
- /* format the value */
- if (scientific) {
- return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- precision, trim_mode, digits_left, exp_digits);
- }
- else {
- return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- cutoff_mode, precision, trim_mode,
- digits_left, digits_right);
- }
+ BigInt_Set_uint32(&bigints[0], mantissa);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent,
+ signbit, mantissaBit, hasUnequalMargins, opt);
}
+/*
+ * IEEE binary64 floating-point format
+ *
+ * sign: 1 bit
+ * exponent: 11 bits
+ * mantissa: 52 bits
+ */
static npy_uint32
-Dragon4_PrintFloat64(char *buffer, npy_uint32 bufferSize, npy_float64 value,
- npy_bool scientific, DigitMode digit_mode,
- CutoffMode cutoff_mode, npy_int32 precision,
- npy_bool sign, TrimMode trim_mode, npy_int32 digits_left,
- npy_int32 digits_right, npy_int32 exp_digits)
+Dragon4_PrintFloat_IEEE_binary64(
+ Dragon4_Scratch *scratch, npy_float64 *value, Dragon4_Options *opt)
{
- FloatUnion64 floatUnion;
- npy_uint32 floatExponent;
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ union
+ {
+ npy_float64 floatingPoint;
+ npy_uint64 integer;
+ } floatUnion;
+ npy_uint32 floatExponent, floatSign;
npy_uint64 floatMantissa;
npy_uint64 mantissa;
}
/* deconstruct the floating point value */
- floatUnion.floatingPoint = value;
- floatExponent = GetExponent_F64(&floatUnion);
- floatMantissa = GetMantissa_F64(&floatUnion);
+ floatUnion.floatingPoint = *value;
+ floatMantissa = floatUnion.integer & bitmask_u64(52);
+ floatExponent = (floatUnion.integer >> 52) & bitmask_u32(11);
+ floatSign = floatUnion.integer >> 63;
/* output the sign */
- if (IsNegative_F64(&floatUnion)) {
+ if (floatSign != 0) {
signbit = '-';
}
- else if (sign) {
+ else if (opt->sign) {
signbit = '+';
}
/* if this is a special value */
- if (floatExponent == 0x7FF) {
+ if (floatExponent == bitmask_u32(11)) {
return PrintInfNan(buffer, bufferSize, floatMantissa, 13, signbit);
}
/* else this is a number */
hasUnequalMargins = NPY_FALSE;
}
- /* format the value */
- if (scientific) {
- return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- precision, trim_mode, digits_left, exp_digits);
- }
- else {
- return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- cutoff_mode, precision, trim_mode,
- digits_left, digits_right);
- }
+ BigInt_Set_uint64(&bigints[0], mantissa);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent,
+ signbit, mantissaBit, hasUnequalMargins, opt);
}
+
+/*
+ * Since systems have different types of long doubles, and may not necessarily
+ * have a 128-byte format we can use to pass values around, here we create
+ * our own 128-bit storage type for convenience.
+ */
+typedef struct FloatVal128 {
+ npy_uint64 hi, lo;
+} FloatVal128;
+
+#if defined(HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE) || \
+ defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \
+ defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \
+ defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE)
+/*
+ * Intel's 80-bit IEEE extended precision floating-point format
+ *
+ * "long doubles" with this format are stored as 96 or 128 bits, but
+ * are equivalent to the 80 bit type with some zero padding on the high bits.
+ * This method expects the user to pass in the value using a 128-bit
+ * FloatVal128, so can support 80, 96, or 128 bit storage formats,
+ * and is endian-independent.
+ *
+ * sign: 1 bit, second u64
+ * exponent: 15 bits, second u64
+ * intbit 1 bit, first u64
+ * mantissa: 63 bits, first u64
+ */
static npy_uint32
-Dragon4_PrintFloat128(char *buffer, npy_uint32 bufferSize, FloatVal128 value,
- npy_bool scientific, DigitMode digit_mode,
- CutoffMode cutoff_mode, npy_int32 precision,
- npy_bool sign, TrimMode trim_mode, npy_int32 digits_left,
- npy_int32 digits_right, npy_int32 exp_digits)
+Dragon4_PrintFloat_Intel_extended(
+ Dragon4_Scratch *scratch, FloatVal128 value, Dragon4_Options *opt)
{
- npy_uint32 floatExponent;
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ npy_uint32 floatExponent, floatSign;
npy_uint64 floatMantissa;
npy_uint64 mantissa;
return 0;
}
- /* deconstruct the floating point value */
- floatExponent = GetExponent_F128(&value);
- floatMantissa = GetMantissa_F128(&value);
+ /* deconstruct the floating point value (we ignore the intbit) */
+ floatMantissa = value.lo & bitmask_u64(63);
+ floatExponent = value.hi & bitmask_u32(15);
+ floatSign = (value.hi >> 15) & 0x1;
/* output the sign */
- if (IsNegative_F128(&value)) {
+ if (floatSign != 0) {
signbit = '-';
}
- else if (sign) {
+ else if (opt->sign) {
signbit = '+';
}
/* if this is a special value */
- if (floatExponent == 0x7FFF) {
+ if (floatExponent == bitmask_u32(15)) {
+ /*
+ * Note: Technically there are other special extended values defined if
+ * the intbit is 0, like Pseudo-Infinity, Pseudo-Nan, Quiet-NaN. We
+ * ignore all of these since they are not generated on modern
+ * processors. We treat Quiet-Nan as simply Nan.
+ */
return PrintInfNan(buffer, bufferSize, floatMantissa, 16, signbit);
}
/* else this is a number */
hasUnequalMargins = NPY_FALSE;
}
- /* format the value */
- if (scientific) {
- return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- precision, trim_mode, digits_left, exp_digits);
+ BigInt_Set_uint64(&bigints[0], mantissa);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent,
+ signbit, mantissaBit, hasUnequalMargins, opt);
+
+}
+
+#endif /* INTEL_EXTENDED group */
+
+
+#ifdef HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE
+/*
+ * Intel's 80-bit IEEE extended precision format, 80-bit storage
+ *
+ * Note: It is not clear if a long double with 10-byte storage exists on any
+ * system. But numpy defines NPY_FLOAT80, so if we come across it, assume it is
+ * an Intel extended format.
+ */
+static npy_uint32
+Dragon4_PrintFloat_Intel_extended80(
+ Dragon4_Scratch *scratch, npy_float80 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ union {
+ npy_float80 floatingPoint;
+ struct {
+ npy_uint64 a;
+ npy_uint16 b;
+ } integer;
+ } buf80;
+
+ buf80.floatingPoint = *value;
+ /* Intel is little-endian */
+ val128.lo = buf80.integer.a;
+ val128.hi = buf80.integer.b;
+
+ return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE */
+
+#ifdef HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE
+/* Intel's 80-bit IEEE extended precision format, 96-bit storage */
+static npy_uint32
+Dragon4_PrintFloat_Intel_extended96(
+ Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ union {
+ npy_float96 floatingPoint;
+ struct {
+ npy_uint64 a;
+ npy_uint32 b;
+ } integer;
+ } buf96;
+
+ buf96.floatingPoint = *value;
+ /* Intel is little-endian */
+ val128.lo = buf96.integer.a;
+ val128.hi = buf96.integer.b;
+
+ return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE */
+
+#ifdef HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE
+/* Motorola Big-endian equivalent of the Intel-extended 96 fp format */
+static npy_uint32
+Dragon4_PrintFloat_Motorola_extended96(
+ Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ union {
+ npy_float96 floatingPoint;
+ struct {
+ npy_uint64 a;
+ npy_uint32 b;
+ } integer;
+ } buf96;
+
+ buf96.floatingPoint = *value;
+ /* Motorola is big-endian */
+ val128.lo = buf96.integer.b;
+ val128.hi = buf96.integer.a >> 16;
+ /* once again we assume the int has same endianness as the float */
+
+ return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE */
+
+
+#ifdef NPY_FLOAT128
+
+typedef union FloatUnion128
+{
+ npy_float128 floatingPoint;
+ struct {
+ npy_uint64 a;
+ npy_uint64 b;
+ } integer;
+} FloatUnion128;
+
+#ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+/* Intel's 80-bit IEEE extended precision format, 128-bit storage */
+static npy_uint32
+Dragon4_PrintFloat_Intel_extended128(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ FloatUnion128 buf128;
+
+ buf128.floatingPoint = *value;
+ /* Intel is little-endian */
+ val128.lo = buf128.integer.a;
+ val128.hi = buf128.integer.b;
+
+ return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE */
+
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
+/*
+ * IEEE binary128 floating-point format
+ *
+ * sign: 1 bit
+ * exponent: 15 bits
+ * mantissa: 112 bits
+ *
+ * Currently binary128 format exists on only a few CPUs, such as on the POWER9
+ * arch or aarch64. Because of this, this code has not been extensively tested.
+ * I am not sure if the arch also supports uint128, and C does not seem to
+ * support int128 literals. So we use uint64 to do manipulation.
+ */
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary128(
+ Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt)
+{
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ npy_uint32 floatExponent, floatSign;
+
+ npy_uint64 mantissa_hi, mantissa_lo;
+ npy_int32 exponent;
+ npy_uint32 mantissaBit;
+ npy_bool hasUnequalMargins;
+ char signbit = '\0';
+
+ if (bufferSize == 0) {
+ return 0;
+ }
+
+ if (bufferSize == 1) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ mantissa_hi = val128.hi & bitmask_u64(48);
+ mantissa_lo = val128.lo;
+ floatExponent = (val128.hi >> 48) & bitmask_u32(15);
+ floatSign = val128.hi >> 63;
+
+ /* output the sign */
+ if (floatSign != 0) {
+ signbit = '-';
+ }
+ else if (opt->sign) {
+ signbit = '+';
+ }
+
+ /* if this is a special value */
+ if (floatExponent == bitmask_u32(15)) {
+ npy_uint64 mantissa_zero = mantissa_hi == 0 && mantissa_lo == 0;
+ return PrintInfNan(buffer, bufferSize, !mantissa_zero, 16, signbit);
+ }
+ /* else this is a number */
+
+ /* factor the value into its parts */
+ if (floatExponent != 0) {
+ /*
+ * normal
+ * The floating point equation is:
+ * value = (1 + mantissa/2^112) * 2 ^ (exponent-16383)
+ * We convert the integer equation by factoring a 2^112 out of the
+ * exponent
+ * value = (1 + mantissa/2^112) * 2^112 * 2 ^ (exponent-16383-112)
+ * value = (2^112 + mantissa) * 2 ^ (exponent-16383-112)
+ * Because of the implied 1 in front of the mantissa we have 112 bits of
+ * precision.
+ * m = (2^112 + mantissa)
+ * e = (exponent-16383+1-112)
+ *
+ * Adding 2^112 to the mantissa is the same as adding 2^48 to the hi
+ * 64 bit part.
+ */
+ mantissa_hi = (1ull << 48) | mantissa_hi;
+ /* mantissa_lo is unchanged */
+ exponent = floatExponent - 16383 - 112;
+ mantissaBit = 112;
+ hasUnequalMargins = (floatExponent != 1) && (mantissa_hi == 0 &&
+ mantissa_lo == 0);
}
else {
- return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit,
- mantissaBit, hasUnequalMargins, digit_mode,
- cutoff_mode, precision, trim_mode,
- digits_left, digits_right);
+ /*
+ * subnormal
+ * The floating point equation is:
+ * value = (mantissa/2^112) * 2 ^ (1-16383)
+ * We convert the integer equation by factoring a 2^112 out of the
+ * exponent
+ * value = (mantissa/2^112) * 2^112 * 2 ^ (1-16383-112)
+ * value = mantissa * 2 ^ (1-16383-112)
+ * We have up to 112 bits of precision.
+ * m = (mantissa)
+ * e = (1-16383-112)
+ */
+ exponent = 1 - 16383 - 112;
+ mantissaBit = LogBase2_128(mantissa_hi, mantissa_lo);
+ hasUnequalMargins = NPY_FALSE;
}
+
+ BigInt_Set_2x_uint64(&bigints[0], mantissa_hi, mantissa_lo);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent,
+ signbit, mantissaBit, hasUnequalMargins, opt);
}
-PyObject *
-Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode,
- CutoffMode cutoff_mode, int precision, int sign,
- TrimMode trim, int pad_left, int pad_right)
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE)
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary128_le(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
{
- /*
- * Use a very large buffer in case anyone tries to output a large numberG.
- * 16384 should be enough to uniquely print any float128, which goes up
- * to about 10^4932 */
- static char repr[16384];
FloatVal128 val128;
-#ifdef NPY_FLOAT80
- FloatUnion80 buf80;;
-#endif
-#ifdef NPY_FLOAT96
- FloatUnion96 buf96;
-#endif
-#ifdef NPY_FLOAT128
FloatUnion128 buf128;
-#endif
- switch (size) {
- case 2:
- Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
- case 4:
- Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
- case 8:
- Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
-#ifdef NPY_FLOAT80
- case 10:
- buf80.floatingPoint = *(npy_float80*)val;
- val128.integer[0] = buf80.integer.a;
- val128.integer[1] = buf80.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
-#endif
-#ifdef NPY_FLOAT96
- case 12:
- buf96.floatingPoint = *(npy_float96*)val;
- val128.integer[0] = buf96.integer.a;
- val128.integer[1] = buf96.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
-#endif
-#ifdef NPY_FLOAT128
- case 16:
- buf128.floatingPoint = *(npy_float128*)val;
- val128.integer[0] = buf128.integer.a;
- val128.integer[1] = buf128.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 0, digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right, -1);
- break;
-#endif
- default:
- PyErr_Format(PyExc_ValueError, "unexpected itemsize %zu", size);
- return NULL;
+ buf128.floatingPoint = *value;
+ val128.lo = buf128.integer.a;
+ val128.hi = buf128.integer.b;
+
+ return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */
+
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
+/*
+ * This function is untested, very few, if any, architectures implement
+ * big endian IEEE binary128 floating point.
+ */
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary128_be(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ FloatUnion128 buf128;
+
+ buf128.floatingPoint = *value;
+ val128.lo = buf128.integer.b;
+ val128.hi = buf128.integer.a;
+
+ return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_BE */
+
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_LE | HAVE_LDOUBLE_IEEE_BE*/
+
+#if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \
+ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE))
+/*
+ * IBM extended precision 128-bit floating-point format, aka IBM double-double
+ *
+ * IBM's double-double type is a pair of IEEE binary64 values, which you add
+ * together to get a total value. The exponents are arranged so that the lower
+ * double is about 2^52 times smaller than the high one, and the nearest
+ * float64 value is simply the upper double, in which case the pair is
+ * considered "normalized" (not to confuse with "normal" and "subnormal"
+ * binary64 values). We assume normalized values. You can see the glibc's
+ * printf on ppc does so too by constructing un-normalized values to get
+ * strange behavior from the OS printf:
+ *
+ * >>> from numpy.core._multiarray_tests import format_float_OSprintf_g
+ * >>> x = np.array([0.3,0.3], dtype='f8').view('f16')[0]
+ * >>> format_float_OSprintf_g(x, 2)
+ * 0.30
+ * >>> format_float_OSprintf_g(2*x, 2)
+ * 1.20
+ *
+ * If we don't assume normalization, x should really print as 0.6.
+ *
+ * For normalized values gcc assumes that the total mantissa is no
+ * more than 106 bits (53+53), so we can drop bits from the second double which
+ * would be pushed past 106 when left-shifting by its exponent, as happens
+ * sometimes. (There has been debate about this, see
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?format=multiple&id=70117,
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=22752 )
+ *
+ * Note: This function is for the IBM-double-double which is a pair of IEEE
+ * binary64 floats, like on ppc64 systems. This is *not* the hexadecimal
+ * IBM-double-double type, which is a pair of IBM hexadecimal64 floats.
+ *
+ * See also:
+ * https://gcc.gnu.org/wiki/Ieee128PowerPCA
+ * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm
+ */
+static npy_uint32
+Dragon4_PrintFloat_IBM_double_double(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+{
+ char *buffer = scratch->repr;
+ npy_uint32 bufferSize = sizeof(scratch->repr);
+ BigInt *bigints = scratch->bigints;
+
+ FloatVal128 val128;
+ FloatUnion128 buf128;
+
+ npy_uint32 floatExponent1, floatExponent2;
+ npy_uint64 floatMantissa1, floatMantissa2;
+ npy_uint32 floatSign1, floatSign2;
+
+ npy_uint64 mantissa1, mantissa2;
+ npy_int32 exponent1, exponent2;
+ int shift;
+ npy_uint32 mantissaBit;
+ npy_bool hasUnequalMargins;
+ char signbit = '\0';
+
+ if (bufferSize == 0) {
+ return 0;
+ }
+
+ if (bufferSize == 1) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ /* The high part always comes before the low part, regardless of the
+ * endianness of the system. */
+ buf128.floatingPoint = *value;
+ val128.hi = buf128.integer.a;
+ val128.lo = buf128.integer.b;
+
+ /* deconstruct the floating point values */
+ floatMantissa1 = val128.hi & bitmask_u64(52);
+ floatExponent1 = (val128.hi >> 52) & bitmask_u32(11);
+ floatSign1 = (val128.hi >> 63) != 0;
+
+ floatMantissa2 = val128.lo & bitmask_u64(52);
+ floatExponent2 = (val128.lo >> 52) & bitmask_u32(11);
+ floatSign2 = (val128.lo >> 63) != 0;
+
+ /* output the sign using 1st float's sign */
+ if (floatSign1) {
+ signbit = '-';
+ }
+ else if (opt->sign) {
+ signbit = '+';
+ }
+
+ /* we only need to look at the first float for inf/nan */
+ if (floatExponent1 == bitmask_u32(11)) {
+ return PrintInfNan(buffer, bufferSize, floatMantissa1, 13, signbit);
+ }
+
+ /* else this is a number */
+
+ /* Factor the 1st value into its parts, see binary64 for comments. */
+ if (floatExponent1 == 0) {
+ /*
+ * If the first number is a subnormal value, the 2nd has to be 0 for
+ * the float128 to be normalized, so we can ignore it. In this case
+ * the float128 only has the precision of a single binary64 value.
+ */
+ mantissa1 = floatMantissa1;
+ exponent1 = 1 - 1023 - 52;
+ mantissaBit = LogBase2_64(mantissa1);
+ hasUnequalMargins = NPY_FALSE;
+
+ BigInt_Set_uint64(&bigints[0], mantissa1);
+ }
+ else {
+ mantissa1 = (1ull << 52) | floatMantissa1;
+ exponent1 = floatExponent1 - 1023 - 52;
+ mantissaBit = 52 + 53;
+
+ /*
+ * Computing hasUnequalMargins and mantissaBit:
+ * This is a little trickier than for IEEE formats.
+ *
+ * When both doubles are "normal" it is clearer since we can think of
+ * it as an IEEE type with a 106 bit mantissa. This value can never
+ * have "unequal" margins because of the implied 1 bit in the 2nd
+ * value. (unequal margins only happen when the mantissa has a value
+ * like "10000000000...", all zeros except the implied bit at the
+ * start, since the next lowest number has a different exponent).
+ * mantissaBits will always be 52+53 in this case.
+ *
+ * If the 1st number is a very small normal, and the 2nd is subnormal
+ * and not 2^52 times smaller, the number behaves like a subnormal
+ * overall, where the upper number just adds some bits on the left.
+ * Like usual subnormals, it has "equal" margins. The slightly tricky
+ * thing is that the number of mantissaBits varies. It will be 52
+ * (from lower double) plus a variable number depending on the upper
+ * number's exponent. We recompute the number of bits in the shift
+ * calculation below, because the shift will be equal to the number of
+ * lost bits.
+ *
+ * We can get unequal margins only if the first value has all-0
+ * mantissa (except implied bit), and the second value is exactly 0. As
+ * a special exception the smallest normal value (smallest exponent, 0
+ * mantissa) should have equal margins, since it is "next to" a
+ * subnormal value.
+ */
+
+ /* factor the 2nd value into its parts */
+ if (floatExponent2 != 0) {
+ mantissa2 = (1ull << 52) | floatMantissa2;
+ exponent2 = floatExponent2 - 1023 - 52;
+ hasUnequalMargins = NPY_FALSE;
+ }
+ else {
+ /* shift exp by one so that leading mantissa bit is still bit 53 */
+ mantissa2 = floatMantissa2 << 1;
+ exponent2 = - 1023 - 52;
+ hasUnequalMargins = (floatExponent1 != 1) && (floatMantissa1 == 0)
+ && (floatMantissa2 == 0);
+ }
+
+ /*
+ * The 2nd val's exponent might not be exactly 52 smaller than the 1st,
+ * it can vary a little bit. So do some shifting of the low mantissa,
+ * so that the total mantissa is equivalent to bits 53 to 0 of the
+ * first double immediately followed by bits 53 to 0 of the second.
+ */
+ shift = exponent1 - exponent2 - 53;
+ if (shift > 0) {
+ /* shift more than 64 is undefined behavior */
+ mantissa2 = shift < 64 ? mantissa2 >> shift : 0;
+ }
+ else if (shift < 0) {
+ /*
+ * This only happens if the 2nd value is subnormal.
+ * We expect that shift > -64, but check it anyway
+ */
+ mantissa2 = -shift < 64 ? mantissa2 << -shift : 0;
+ }
+
+ /*
+ * If the low double is a different sign from the high double,
+ * rearrange so that the total mantissa is the sum of the two
+ * mantissas, instead of a subtraction.
+ * hi - lo -> (hi-1) + (1-lo), where lo < 1
+ */
+ if (floatSign1 != floatSign2 && mantissa2 != 0) {
+ mantissa1--;
+ mantissa2 = (1ull << 53) - mantissa2;
+ }
+
+ /*
+ * Compute the number of bits if we are in the subnormal range.
+ * The value "shift" happens to be exactly the number of lost bits.
+ * Also, shift the bits so that the least significant bit is at
+ * bit position 0, like a typical subnormal. After this exponent1
+ * should always be 2^-1022
+ */
+ if (shift < 0) {
+ mantissa2 = (mantissa2 >> -shift) | (mantissa1 << (53 + shift));
+ mantissa1 = mantissa1 >> -shift;
+ mantissaBit = mantissaBit -(-shift);
+ exponent1 -= shift;
+ DEBUG_ASSERT(exponent1 == -1022);
+ }
+
+ /*
+ * set up the BigInt mantissa, by shifting the parts as needed
+ * We can use | instead of + since the mantissas should not overlap
+ */
+ BigInt_Set_2x_uint64(&bigints[0], mantissa1 >> 11,
+ (mantissa1 << 53) | (mantissa2));
+ exponent1 = exponent1 - 53;
}
- return PyUString_FromString(repr);
+ return Format_floatbits(buffer, bufferSize, bigints, exponent1,
+ signbit, mantissaBit, hasUnequalMargins, opt);
}
+#endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE | HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE */
+
+#endif /* NPY_FLOAT128 */
+
+
+/*
+ * Here we define two Dragon4 entry functions for each type. One of them
+ * accepts the args in a Dragon4_Options struct for convenience, the
+ * other enumerates only the necessary parameters.
+ *
+ * Use a very large string buffer in case anyone tries to output a large number.
+ * 16384 should be enough to exactly print the integer part of any float128,
+ * which goes up to about 10^4932. The Dragon4_scratch struct provides a string
+ * buffer of this size.
+ */
+#define make_dragon4_typefuncs_inner(Type, npy_type, format) \
+\
+PyObject *\
+Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
+{\
+ PyObject *ret;\
+ Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\
+ if (scratch == NULL) {\
+ return NULL;\
+ }\
+ if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\
+ free_dragon4_bigint_scratch(scratch);\
+ return NULL;\
+ }\
+ ret = PyUString_FromString(scratch->repr);\
+ free_dragon4_bigint_scratch(scratch);\
+ return ret;\
+}\
+\
+PyObject *\
+Dragon4_Positional_##Type(npy_type *val, DigitMode digit_mode,\
+ CutoffMode cutoff_mode, int precision,\
+ int sign, TrimMode trim, int pad_left, int pad_right)\
+{\
+ Dragon4_Options opt;\
+ \
+ opt.scientific = 0;\
+ opt.digit_mode = digit_mode;\
+ opt.cutoff_mode = cutoff_mode;\
+ opt.precision = precision;\
+ opt.sign = sign;\
+ opt.trim_mode = trim;\
+ opt.digits_left = pad_left;\
+ opt.digits_right = pad_right;\
+ opt.exp_digits = -1;\
+\
+ return Dragon4_Positional_##Type##_opt(val, &opt);\
+}\
+\
+PyObject *\
+Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
+{\
+ PyObject *ret;\
+ Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\
+ if (scratch == NULL) {\
+ return NULL;\
+ }\
+ if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\
+ free_dragon4_bigint_scratch(scratch);\
+ return NULL;\
+ }\
+ ret = PyUString_FromString(scratch->repr);\
+ free_dragon4_bigint_scratch(scratch);\
+ return ret;\
+}\
+PyObject *\
+Dragon4_Scientific_##Type(npy_type *val, DigitMode digit_mode, int precision,\
+ int sign, TrimMode trim, int pad_left, int exp_digits)\
+{\
+ Dragon4_Options opt;\
+\
+ opt.scientific = 1;\
+ opt.digit_mode = digit_mode;\
+ opt.cutoff_mode = CutoffMode_TotalLength;\
+ opt.precision = precision;\
+ opt.sign = sign;\
+ opt.trim_mode = trim;\
+ opt.digits_left = pad_left;\
+ opt.digits_right = -1;\
+ opt.exp_digits = exp_digits;\
+\
+ return Dragon4_Scientific_##Type##_opt(val, &opt);\
+}
+
+#define make_dragon4_typefuncs(Type, npy_type, format) \
+ make_dragon4_typefuncs_inner(Type, npy_type, format)
+
+make_dragon4_typefuncs(Half, npy_half, NPY_HALF_BINFMT_NAME)
+make_dragon4_typefuncs(Float, npy_float, NPY_FLOAT_BINFMT_NAME)
+make_dragon4_typefuncs(Double, npy_double, NPY_DOUBLE_BINFMT_NAME)
+make_dragon4_typefuncs(LongDouble, npy_longdouble, NPY_LONGDOUBLE_BINFMT_NAME)
+
+#undef make_dragon4_typefuncs
+#undef make_dragon4_typefuncs_inner
+
PyObject *
Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode,
int precision, int sign, TrimMode trim, int pad_left,
int pad_right)
{
- double val;
+ npy_double val;
+ Dragon4_Options opt;
+
+ opt.scientific = 0;
+ opt.digit_mode = digit_mode;
+ opt.cutoff_mode = cutoff_mode;
+ opt.precision = precision;
+ opt.sign = sign;
+ opt.trim_mode = trim;
+ opt.digits_left = pad_left;
+ opt.digits_right = pad_right;
+ opt.exp_digits = -1;
if (PyArray_IsScalar(obj, Half)) {
npy_half x = ((PyHalfScalarObject *)obj)->obval;
- return Dragon4_Positional_AnySize(&x, sizeof(npy_half),
- digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right);
+ return Dragon4_Positional_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
npy_float x = ((PyFloatScalarObject *)obj)->obval;
- return Dragon4_Positional_AnySize(&x, sizeof(npy_float),
- digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right);
+ return Dragon4_Positional_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
npy_double x = ((PyDoubleScalarObject *)obj)->obval;
- return Dragon4_Positional_AnySize(&x, sizeof(npy_double),
- digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right);
+ return Dragon4_Positional_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
- return Dragon4_Positional_AnySize(&x, sizeof(npy_longdouble),
- digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right);
+ return Dragon4_Positional_LongDouble_opt(&x, &opt);
}
val = PyFloat_AsDouble(obj);
if (PyErr_Occurred()) {
return NULL;
}
- return Dragon4_Positional_AnySize(&val, sizeof(double),
- digit_mode, cutoff_mode, precision,
- sign, trim, pad_left, pad_right);
-}
-
-PyObject *
-Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode,
- int precision, int sign, TrimMode trim,
- int pad_left, int exp_digits)
-{
- /* use a very large buffer in case anyone tries to output a large precision */
- static char repr[4096];
- FloatVal128 val128;
-#ifdef NPY_FLOAT80
- FloatUnion80 buf80;;
-#endif
-#ifdef NPY_FLOAT96
- FloatUnion96 buf96;
-#endif
-#ifdef NPY_FLOAT128
- FloatUnion128 buf128;
-#endif
-
- /* dummy, is ignored in scientific mode */
- CutoffMode cutoff_mode = CutoffMode_TotalLength;
-
- switch (size) {
- case 2:
- Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
- case 4:
- Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
- case 8:
- Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
-#ifdef NPY_FLOAT80
- case 10:
- buf80.floatingPoint = *(npy_float80*)val;
- val128.integer[0] = buf80.integer.a;
- val128.integer[1] = buf80.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
-#endif
-#ifdef NPY_FLOAT96
- case 12:
- buf96.floatingPoint = *(npy_float96*)val;
- val128.integer[0] = buf96.integer.a;
- val128.integer[1] = buf96.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
-#endif
-#ifdef NPY_FLOAT128
- case 16:
- buf128.floatingPoint = *(npy_float128*)val;
- val128.integer[0] = buf128.integer.a;
- val128.integer[1] = buf128.integer.b;
- Dragon4_PrintFloat128(repr, sizeof(repr), val128,
- 1, digit_mode, cutoff_mode, precision, sign,
- trim, pad_left, -1, exp_digits);
- break;
-#endif
- default:
- PyErr_Format(PyExc_ValueError, "unexpected itemsize %zu", size);
- return NULL;
- }
-
- return PyUString_FromString(repr);
+ return Dragon4_Positional_Double_opt(&val, &opt);
}
PyObject *
Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision,
int sign, TrimMode trim, int pad_left, int exp_digits)
{
- double val;
+ npy_double val;
+ Dragon4_Options opt;
+
+ opt.scientific = 1;
+ opt.digit_mode = digit_mode;
+ opt.cutoff_mode = CutoffMode_TotalLength;
+ opt.precision = precision;
+ opt.sign = sign;
+ opt.trim_mode = trim;
+ opt.digits_left = pad_left;
+ opt.digits_right = -1;
+ opt.exp_digits = exp_digits;
if (PyArray_IsScalar(obj, Half)) {
npy_half x = ((PyHalfScalarObject *)obj)->obval;
- return Dragon4_Scientific_AnySize(&x, sizeof(npy_half),
- digit_mode, precision,
- sign, trim, pad_left, exp_digits);
+ return Dragon4_Scientific_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
npy_float x = ((PyFloatScalarObject *)obj)->obval;
- return Dragon4_Scientific_AnySize(&x, sizeof(npy_float),
- digit_mode, precision,
- sign, trim, pad_left, exp_digits);
+ return Dragon4_Scientific_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
npy_double x = ((PyDoubleScalarObject *)obj)->obval;
- return Dragon4_Scientific_AnySize(&x, sizeof(npy_double),
- digit_mode, precision,
- sign, trim, pad_left, exp_digits);
+ return Dragon4_Scientific_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
- return Dragon4_Scientific_AnySize(&x, sizeof(npy_longdouble),
- digit_mode, precision,
- sign, trim, pad_left, exp_digits);
+ return Dragon4_Scientific_LongDouble_opt(&x, &opt);
}
val = PyFloat_AsDouble(obj);
if (PyErr_Occurred()) {
return NULL;
}
- return Dragon4_Scientific_AnySize(&val, sizeof(double),
- digit_mode, precision,
- sign, trim, pad_left, exp_digits);
+ return Dragon4_Scientific_Double_opt(&val, &opt);
}
+
+#undef DEBUG_ASSERT
#include "npy_pycompat.h"
#include "numpy/arrayscalars.h"
+/* Half binary format */
+#define NPY_HALF_BINFMT_NAME IEEE_binary16
+
+/* Float binary format */
+#if NPY_BITSOF_FLOAT == 32
+ #define NPY_FLOAT_BINFMT_NAME IEEE_binary32
+#elif NPY_BITSOF_FLOAT == 64
+ #define NPY_FLOAT_BINFMT_NAME IEEE_binary64
+#else
+ #error No float representation defined
+#endif
+
+/* Double binary format */
+#if NPY_BITSOF_DOUBLE == 32
+ #define NPY_DOUBLE_BINFMT_NAME IEEE_binary32
+#elif NPY_BITSOF_DOUBLE == 64
+ #define NPY_DOUBLE_BINFMT_NAME IEEE_binary64
+#else
+ #error No double representation defined
+#endif
+
+/* LongDouble binary format */
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
+ #define NPY_LONGDOUBLE_BINFMT_NAME IEEE_binary128_be
+#elif defined(HAVE_LDOUBLE_IEEE_QUAD_LE)
+ #define NPY_LONGDOUBLE_BINFMT_NAME IEEE_binary128_le
+#elif (defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE) || \
+ defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE))
+ #define NPY_LONGDOUBLE_BINFMT_NAME IEEE_binary64
+#elif defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE)
+ #define NPY_LONGDOUBLE_BINFMT_NAME Intel_extended96
+#elif defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE)
+ #define NPY_LONGDOUBLE_BINFMT_NAME Intel_extended128
+#elif defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE)
+ #define NPY_LONGDOUBLE_BINFMT_NAME Motorola_extended96
+#elif (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \
+ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE))
+ #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double
+#else
+ #error No long double representation defined
+#endif
+
typedef enum DigitMode
{
/* Round digits to print shortest uniquely identifiable number. */
TrimMode_DptZeros, /* trim trailing zeros & trailing decimal point */
} TrimMode;
-PyObject *
-Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode,
- CutoffMode cutoff_mode, int precision, int sign,
- TrimMode trim, int pad_left, int pad_right);
+#define make_dragon4_typedecl(Type, npy_type) \
+ PyObject *\
+ Dragon4_Positional_##Type(npy_type *val, DigitMode digit_mode,\
+ CutoffMode cutoff_mode, int precision,\
+ int sign, TrimMode trim, int pad_left,\
+ int pad_right);\
+ PyObject *\
+ Dragon4_Scientific_##Type(npy_type *val, DigitMode digit_mode,\
+ int precision, int sign, TrimMode trim,\
+ int pad_left, int exp_digits);
-PyObject *
-Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode,
- int precision, int sign, TrimMode trim,
- int pad_left, int pad_right);
+make_dragon4_typedecl(Half, npy_half)
+make_dragon4_typedecl(Float, npy_float)
+make_dragon4_typedecl(Double, npy_double)
+make_dragon4_typedecl(LongDouble, npy_longdouble)
+
+#undef make_dragon4_typedecl
PyObject *
Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode,
* The copyswap functions shouldn't need that.
*/
Py_INCREF(dtype);
- data->arr = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type, dtype,
- 1, &shape, NULL, NULL, 0, NULL, 0, 1);
+ data->arr = (PyArrayObject *)PyArray_NewFromDescr_int(
+ &PyArray_Type, dtype,
+ 1, &shape, NULL, NULL,
+ 0, NULL, NULL,
+ 0, 1);
if (data->arr == NULL) {
PyArray_free(data);
return NPY_FAIL;
return NPY_FAIL;
}
}
- data->aip = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type,
- tmp_dtype, 1, &shape, NULL, NULL, 0, NULL, 0, 1);
+ data->aip = (PyArrayObject *)PyArray_NewFromDescr_int(
+ &PyArray_Type, tmp_dtype,
+ 1, &shape, NULL, NULL,
+ 0, NULL, NULL,
+ 0, 1);
if (data->aip == NULL) {
PyArray_free(data);
return NPY_FAIL;
return NPY_FAIL;
}
}
- data->aop = (PyArrayObject *)PyArray_NewFromDescr_int(&PyArray_Type,
- tmp_dtype, 1, &shape, NULL, NULL, 0, NULL, 0, 1);
+ data->aop = (PyArrayObject *)PyArray_NewFromDescr_int(
+ &PyArray_Type, tmp_dtype,
+ 1, &shape, NULL, NULL,
+ 0, NULL, NULL,
+ 0, 1);
if (data->aop == NULL) {
Py_DECREF(data->aip);
PyArray_free(data);
{
npy_intp src_itemsize, dst_itemsize;
int src_type_num, dst_type_num;
+ int is_builtin;
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
dst_itemsize = dst_dtype->elsize;
src_type_num = src_dtype->type_num;
dst_type_num = dst_dtype->type_num;
+ is_builtin = src_type_num < NPY_NTYPES && dst_type_num < NPY_NTYPES;
/* Common special case - number -> number NBO cast */
if (PyTypeNum_ISNUMBER(src_type_num) &&
}
/*
- * If there are no references and the data types are equivalent,
+ * If there are no references and the data types are equivalent and builtin,
* return a simple copy
*/
if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
( !PyDataType_HASFIELDS(dst_dtype) ||
- is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) &&
+ is_builtin) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
!PyDataType_HASSUBARRAY(dst_dtype) &&
src_type_num != NPY_DATETIME && src_type_num != NPY_TIMEDELTA) {
/* A custom data type requires that we use its copy/swap */
- if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) {
+ if (!is_builtin) {
/*
* If the sizes and kinds are identical, but they're different
* custom types, then get a cast function
#include "convert.h"
#include "common.h"
+#include "ctors.h"
#ifdef NPY_HAVE_SSE_INTRINSICS
#define EINSUM_USE_SSE1 1
accum += @from@(data0[@i@]) * @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
return;
}
accum += @from@(data1[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(value0 * accum);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
return;
}
accum += @from@(data0[@i@]);
/**end repeat2**/
case 0:
- *(@type@ *)dataptr[2] += @to@(accum * value1);
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
return;
}
return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
}
+
/*
- * Parses the subscripts for one operand into an output
- * of 'ndim' labels
+ * Parses the subscripts for one operand into an output of 'ndim'
+ * labels. The resulting 'op_labels' array will have:
+ * - the ASCII code of the label for the first occurrence of a label;
+ * - the (negative) offset to the first occurrence of the label for
+ * repeated labels;
+ * - zero for broadcast dimensions, if subscripts has an ellipsis.
+ * For example:
+ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2]
+ * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99]
*/
+
static int
parse_operand_subscripts(char *subscripts, int length,
- int ndim,
- int iop, char *out_labels,
- char *out_label_counts,
- int *out_min_label,
- int *out_max_label,
- int *out_num_labels)
+ int ndim, int iop, char *op_labels,
+ char *label_counts, int *min_label, int *max_label)
{
- int i, idim, ndim_left, label;
- int ellipsis = 0;
+ int i;
+ int idim = 0;
+ int ellipsis = -1;
+
+ /* Process all labels for this operand */
+ for (i = 0; i < length; ++i) {
+ int label = subscripts[i];
- /* Process the labels from the end until the ellipsis */
- idim = ndim-1;
- for (i = length-1; i >= 0; --i) {
- label = subscripts[i];
- /* A label for an axis */
+ /* A proper label for an axis. */
if (label > 0 && isalpha(label)) {
- if (idim >= 0) {
- out_labels[idim--] = label;
- /* Calculate the min and max labels */
- if (label < *out_min_label) {
- *out_min_label = label;
- }
- if (label > *out_max_label) {
- *out_max_label = label;
- }
- /* If it's the first time we see this label, count it */
- if (out_label_counts[label] == 0) {
- (*out_num_labels)++;
- }
- out_label_counts[label]++;
- }
- else {
+ /* Check we don't exceed the operator dimensions. */
+ if (idim >= ndim) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for operand %d", iop);
- return 0;
+ "einstein sum subscripts string contains "
+ "too many subscripts for operand %d", iop);
+ return -1;
+ }
+
+ op_labels[idim++] = label;
+ if (label < *min_label) {
+ *min_label = label;
+ }
+ if (label > *max_label) {
+ *max_label = label;
}
+ label_counts[label]++;
}
- /* The end of the ellipsis */
+ /* The beginning of the ellipsis. */
else if (label == '.') {
- /* A valid ellipsis */
- if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') {
- ellipsis = 1;
- length = i-2;
- break;
- }
- else {
+ /* Check it's a proper ellipsis. */
+ if (ellipsis != -1 || i + 2 >= length
+ || subscripts[++i] != '.' || subscripts[++i] != '.') {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') in "
- "operand %d", iop);
- return 0;
-
+ "einstein sum subscripts string contains a "
+ "'.' that is not part of an ellipsis ('...') "
+ "in operand %d", iop);
+ return -1;
}
+
+ ellipsis = idim;
}
else if (label != ' ') {
PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return 0;
+ "invalid subscript '%c' in einstein sum "
+ "subscripts string, subscripts must "
+ "be letters", (char)label);
+ return -1;
}
}
- if (!ellipsis && idim != -1) {
- PyErr_Format(PyExc_ValueError,
- "operand has more dimensions than subscripts "
- "given in einstein sum, but no '...' ellipsis "
- "provided to broadcast the extra dimensions.");
- return 0;
- }
-
- /* Reduce ndim to just the dimensions left to fill at the beginning */
- ndim_left = idim+1;
- idim = 0;
-
- /*
- * If we stopped because of an ellipsis, start again from the beginning.
- * The length was truncated to end at the ellipsis in this case.
- */
- if (i > 0) {
- for (i = 0; i < length; ++i) {
- label = subscripts[i];
- /* A label for an axis */
- if (label > 0 && isalnum(label)) {
- if (idim < ndim_left) {
- out_labels[idim++] = label;
- /* Calculate the min and max labels */
- if (label < *out_min_label) {
- *out_min_label = label;
- }
- if (label > *out_max_label) {
- *out_max_label = label;
- }
- /* If it's the first time we see this label, count it */
- if (out_label_counts[label] == 0) {
- (*out_num_labels)++;
- }
- out_label_counts[label]++;
- }
- else {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for operand %d", iop);
- return 0;
- }
- }
- else if (label == '.') {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') in "
- "operand %d", iop);
- }
- else if (label != ' ') {
- PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return 0;
- }
+ /* No ellipsis found, labels must match dimensions exactly. */
+ if (ellipsis == -1) {
+ if (idim != ndim) {
+ PyErr_Format(PyExc_ValueError,
+ "operand has more dimensions than subscripts "
+ "given in einstein sum, but no '...' ellipsis "
+ "provided to broadcast the extra dimensions.");
+ return -1;
}
}
-
- /* Set the remaining labels to 0 */
- while (idim < ndim_left) {
- out_labels[idim++] = 0;
+ /* Ellipsis found, may have to add broadcast dimensions. */
+ else if (idim < ndim) {
+ /* Move labels after ellipsis to the end. */
+ for (i = 0; i < idim - ellipsis; ++i) {
+ op_labels[ndim - i - 1] = op_labels[idim - i - 1];
+ }
+ /* Set all broadcast dimensions to zero. */
+ for (i = 0; i < ndim - idim; ++i) {
+ op_labels[ellipsis + i] = 0;
+ }
}
/*
* twos complement arithmetic the char is ok either way here, and
* later where it matters the char is cast to a signed char.
*/
- for (idim = 0; idim < ndim-1; ++idim) {
- char *next;
- /* If this is a proper label, find any duplicates of it */
- label = out_labels[idim];
+ for (idim = 0; idim < ndim - 1; ++idim) {
+ int label = op_labels[idim];
+ /* If it is a proper label, find any duplicates of it. */
if (label > 0) {
- /* Search for the next matching label */
- next = (char *)memchr(out_labels+idim+1, label,
- ndim-idim-1);
+ /* Search for the next matching label. */
+ char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1);
+
while (next != NULL) {
- /* The offset from next to out_labels[idim] (negative) */
- *next = (char)((out_labels+idim)-next);
- /* Search for the next matching label */
- next = (char *)memchr(next+1, label,
- out_labels+ndim-1-next);
+ /* The offset from next to op_labels[idim] (negative). */
+ *next = (char)((op_labels + idim) - next);
+ /* Search for the next matching label. */
+ next = memchr(next + 1, label, op_labels + ndim - 1 - next);
}
}
}
- return 1;
+ return 0;
}
+
/*
- * Parses the subscripts for the output operand into an output
- * that requires 'ndim_broadcast' unlabeled dimensions, returning
- * the number of output dimensions. Returns -1 if there is an error.
+ * Parses the subscripts for the output operand into an output that
+ * includes 'ndim_broadcast' unlabeled dimensions, and returns the total
+ * number of output dimensions, or -1 if there is an error. Similarly
+ * to parse_operand_subscripts, the 'out_labels' array will have, for
+ * each dimension:
+ * - the ASCII code of the corresponding label;
+ * - zero for broadcast dimensions, if subscripts has an ellipsis.
*/
static int
parse_output_subscripts(char *subscripts, int length,
int ndim_broadcast,
- const char *label_counts,
- char *out_labels)
+ const char *label_counts, char *out_labels)
{
- int i, nlabels, label, idim, ndim, ndim_left;
+ int i, bdim;
+ int ndim = 0;
int ellipsis = 0;
- /* Count the labels, making sure they're all unique and valid */
- nlabels = 0;
+ /* Process all the output labels. */
for (i = 0; i < length; ++i) {
- label = subscripts[i];
- if (label > 0 && isalpha(label)) {
- /* Check if it occurs again */
- if (memchr(subscripts+i+1, label, length-i-1) == NULL) {
- /* Check that it was used in the inputs */
- if (label_counts[label] == 0) {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string included "
- "output subscript '%c' which never appeared "
- "in an input", (char)label);
- return -1;
- }
+ int label = subscripts[i];
- nlabels++;
- }
- else {
+ /* A proper label for an axis. */
+ if (label > 0 && isalpha(label)) {
+ /* Check that it doesn't occur again. */
+ if (memchr(subscripts + i + 1, label, length - i - 1) != NULL) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string includes "
- "output subscript '%c' multiple times",
- (char)label);
+ "einstein sum subscripts string includes "
+ "output subscript '%c' multiple times",
+ (char)label);
return -1;
}
- }
- else if (label != '.' && label != ' ') {
- PyErr_Format(PyExc_ValueError,
- "invalid subscript '%c' in einstein sum "
- "subscripts string, subscripts must "
- "be letters", (char)label);
- return -1;
- }
- }
-
- /* The number of output dimensions */
- ndim = ndim_broadcast + nlabels;
-
- /* Process the labels from the end until the ellipsis */
- idim = ndim-1;
- for (i = length-1; i >= 0; --i) {
- label = subscripts[i];
- /* A label for an axis */
- if (label != '.' && label != ' ') {
- if (idim >= 0) {
- out_labels[idim--] = label;
+ /* Check that it was used in the inputs. */
+ if (label_counts[label] == 0) {
+ PyErr_Format(PyExc_ValueError,
+ "einstein sum subscripts string included "
+ "output subscript '%c' which never appeared "
+ "in an input", (char)label);
+ return -1;
}
- else {
+ /* Check that there is room in out_labels for this label. */
+ if (ndim >= NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many output subscripts");
+ "einstein sum subscripts string contains "
+ "too many subscripts in the output");
return -1;
}
+
+ out_labels[ndim++] = label;
}
- /* The end of the ellipsis */
+ /* The beginning of the ellipsis. */
else if (label == '.') {
- /* A valid ellipsis */
- if (i >= 2 && subscripts[i-1] == '.' && subscripts[i-2] == '.') {
- ellipsis = 1;
- length = i-2;
- break;
- }
- else {
+ /* Check it is a proper ellipsis. */
+ if (ellipsis || i + 2 >= length
+ || subscripts[++i] != '.' || subscripts[++i] != '.') {
PyErr_SetString(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') "
- "in the output");
+ "einstein sum subscripts string "
+ "contains a '.' that is not part of "
+ "an ellipsis ('...') in the output");
return -1;
-
}
- }
- }
-
- if (!ellipsis && idim != -1) {
- PyErr_SetString(PyExc_ValueError,
- "output has more dimensions than subscripts "
- "given in einstein sum, but no '...' ellipsis "
- "provided to broadcast the extra dimensions.");
- return 0;
- }
-
- /* Reduce ndim to just the dimensions left to fill at the beginning */
- ndim_left = idim+1;
- idim = 0;
-
- /*
- * If we stopped because of an ellipsis, start again from the beginning.
- * The length was truncated to end at the ellipsis in this case.
- */
- if (i > 0) {
- for (i = 0; i < length; ++i) {
- label = subscripts[i];
- if (label == '.') {
- PyErr_SetString(PyExc_ValueError,
- "einstein sum subscripts string contains a "
- "'.' that is not part of an ellipsis ('...') "
- "in the output");
+ /* Check there is room in out_labels for broadcast dims. */
+ if (ndim + ndim_broadcast > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "einstein sum subscripts string contains "
+ "too many subscripts in the output");
return -1;
}
- /* A label for an axis */
- else if (label != ' ') {
- if (idim < ndim_left) {
- out_labels[idim++] = label;
- }
- else {
- PyErr_Format(PyExc_ValueError,
- "einstein sum subscripts string contains "
- "too many subscripts for the output");
- return -1;
- }
+
+ ellipsis = 1;
+ for (bdim = 0; bdim < ndim_broadcast; ++bdim) {
+ out_labels[ndim++] = 0;
}
}
+ else if (label != ' ') {
+ PyErr_Format(PyExc_ValueError,
+ "invalid subscript '%c' in einstein sum "
+ "subscripts string, subscripts must "
+ "be letters", (char)label);
+ return -1;
+ }
}
- /* Set the remaining output labels to 0 */
- while (idim < ndim_left) {
- out_labels[idim++] = 0;
+ /* If no ellipsis was found there should be no broadcast dimensions. */
+ if (!ellipsis && ndim_broadcast > 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "output has more dimensions than subscripts "
+ "given in einstein sum, but no '...' ellipsis "
+ "provided to broadcast the extra dimensions.");
+ return -1;
}
return ndim;
if (ibroadcast == ndim_output) {
PyErr_SetString(PyExc_ValueError,
"output had too few broadcast dimensions");
- return 0;
+ return -1;
}
new_dims[ibroadcast] = PyArray_DIM(op, idim);
new_strides[ibroadcast] = PyArray_STRIDE(op, idim);
"index '%c' don't match (%d != %d)",
iop, label, (int)new_dims[i],
(int)PyArray_DIM(op, idim));
- return 0;
+ return -1;
}
new_dims[i] = PyArray_DIM(op, idim);
new_strides[i] += PyArray_STRIDE(op, idim);
/* If we processed all the input axes, return a view */
if (idim == ndim) {
Py_INCREF(PyArray_DESCR(op));
- *ret = (PyArrayObject *)PyArray_NewFromDescr(
- Py_TYPE(op),
- PyArray_DESCR(op),
- ndim_output, new_dims, new_strides,
- PyArray_DATA(op),
- PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
- (PyObject *)op);
+ *ret = (PyArrayObject *)PyArray_NewFromDescr_int(
+ Py_TYPE(op), PyArray_DESCR(op),
+ ndim_output, new_dims, new_strides, PyArray_DATA(op),
+ PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
+ (PyObject *)op, (PyObject *)op,
+ 0, 0);
if (*ret == NULL) {
- return 0;
- }
- if (!PyArray_Check(*ret)) {
- Py_DECREF(*ret);
- *ret = NULL;
- PyErr_SetString(PyExc_RuntimeError,
- "NewFromDescr failed to return an array");
- return 0;
- }
- PyArray_UpdateFlags(*ret,
- NPY_ARRAY_C_CONTIGUOUS|
- NPY_ARRAY_ALIGNED|
- NPY_ARRAY_F_CONTIGUOUS);
- Py_INCREF(op);
- if (PyArray_SetBaseObject(*ret, (PyObject *)op) < 0) {
- Py_DECREF(*ret);
- *ret = NULL;
- return 0;
+ return -1;
}
- return 1;
+ return 0;
}
/* Return success, but that we couldn't make a view */
*ret = NULL;
- return 1;
+ return 0;
}
static PyArrayObject *
ndim = icombine;
Py_INCREF(PyArray_DESCR(op));
- ret = (PyArrayObject *)PyArray_NewFromDescr(
- Py_TYPE(op),
- PyArray_DESCR(op),
- ndim, new_dims, new_strides,
- PyArray_DATA(op),
- PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
- (PyObject *)op);
-
- if (ret == NULL) {
- return NULL;
- }
- if (!PyArray_Check(ret)) {
- Py_DECREF(ret);
- PyErr_SetString(PyExc_RuntimeError,
- "NewFromDescr failed to return an array");
- return NULL;
- }
- PyArray_UpdateFlags(ret,
- NPY_ARRAY_C_CONTIGUOUS|
- NPY_ARRAY_ALIGNED|
- NPY_ARRAY_F_CONTIGUOUS);
- Py_INCREF(op);
- if (PyArray_SetBaseObject(ret, (PyObject *)op) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
-
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ Py_TYPE(op), PyArray_DESCR(op),
+ ndim, new_dims, new_strides, PyArray_DATA(op),
+ PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
+ (PyObject *)op, (PyObject *)op);
return ret;
}
}
}
- return 1;
+ return 0;
}
static int
NPY_ORDER order, NPY_CASTING casting,
PyArrayObject *out)
{
- int iop, label, min_label = 127, max_label = 0, num_labels;
+ int iop, label, min_label = 127, max_label = 0;
char label_counts[128];
char op_labels[NPY_MAXARGS][NPY_MAXDIMS];
char output_labels[NPY_MAXDIMS], *iter_labels;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
- npy_uint32 op_flags[NPY_MAXARGS];
+ npy_uint32 iter_flags, op_flags[NPY_MAXARGS];
NpyIter *iter;
sum_of_products_fn sop;
/* Parse the subscripts string into label_counts and op_labels */
memset(label_counts, 0, sizeof(label_counts));
- num_labels = 0;
for (iop = 0; iop < nop; ++iop) {
int length = (int)strcspn(subscripts, ",-");
return NULL;
}
- if (!parse_operand_subscripts(subscripts, length,
+ if (parse_operand_subscripts(subscripts, length,
PyArray_NDIM(op_in[iop]),
iop, op_labels[iop], label_counts,
- &min_label, &max_label, &num_labels)) {
+ &min_label, &max_label) < 0) {
return NULL;
}
}
/*
- * If there is no output signature, create one using each label
- * that appeared once, in alphabetical order
+ * If there is no output signature, fill output_labels and ndim_output
+ * using each label that appeared once, in alphabetical order.
*/
if (subscripts[0] == '\0') {
- char outsubscripts[NPY_MAXDIMS + 3];
- int length;
- /* If no output was specified, always broadcast left (like normal) */
- outsubscripts[0] = '.';
- outsubscripts[1] = '.';
- outsubscripts[2] = '.';
- length = 3;
+ /* If no output was specified, always broadcast left, as usual. */
+ for (ndim_output = 0; ndim_output < ndim_broadcast; ++ndim_output) {
+ output_labels[ndim_output] = 0;
+ }
for (label = min_label; label <= max_label; ++label) {
if (label_counts[label] == 1) {
- if (length < NPY_MAXDIMS-1) {
- outsubscripts[length++] = label;
+ if (ndim_output < NPY_MAXDIMS) {
+ output_labels[ndim_output++] = label;
}
else {
PyErr_SetString(PyExc_ValueError,
}
}
}
- /* Parse the output subscript string */
- ndim_output = parse_output_subscripts(outsubscripts, length,
- ndim_broadcast, label_counts,
- output_labels);
}
else {
if (subscripts[0] != '-' || subscripts[1] != '>') {
}
subscripts += 2;
- /* Parse the output subscript string */
+ /* Parse the output subscript string. */
ndim_output = parse_output_subscripts(subscripts, strlen(subscripts),
ndim_broadcast, label_counts,
output_labels);
- }
- if (ndim_output < 0) {
- return NULL;
+ if (ndim_output < 0) {
+ return NULL;
+ }
}
if (out != NULL && PyArray_NDIM(out) != ndim_output) {
if (iop == 0 && nop == 1 && out == NULL) {
ret = NULL;
- if (!get_single_op_view(op_in[iop], iop, labels,
- ndim_output, output_labels,
- &ret)) {
+ if (get_single_op_view(op_in[iop], iop, labels,
+ ndim_output, output_labels,
+ &ret) < 0) {
return NULL;
}
for (iop = 0; iop < nop; ++iop) {
op_axes[iop] = op_axes_arrays[iop];
- if (!prepare_op_axes(PyArray_NDIM(op[iop]), iop, op_labels[iop],
- op_axes[iop], ndim_iter, iter_labels)) {
+ if (prepare_op_axes(PyArray_NDIM(op[iop]), iop, op_labels[iop],
+ op_axes[iop], ndim_iter, iter_labels) < 0) {
goto fail;
}
}
NPY_ITER_ALIGNED|
NPY_ITER_ALLOCATE|
NPY_ITER_NO_BROADCAST;
+ iter_flags = NPY_ITER_EXTERNAL_LOOP|
+ NPY_ITER_BUFFERED|
+ NPY_ITER_DELAY_BUFALLOC|
+ NPY_ITER_GROWINNER|
+ NPY_ITER_REDUCE_OK|
+ NPY_ITER_REFS_OK|
+ NPY_ITER_ZEROSIZE_OK;
+ if (out != NULL) {
+ iter_flags |= NPY_ITER_COPY_IF_OVERLAP;
+ }
+ if (dtype == NULL) {
+ iter_flags |= NPY_ITER_COMMON_DTYPE;
+ }
/* Allocate the iterator */
- iter = NpyIter_AdvancedNew(nop+1, op, NPY_ITER_EXTERNAL_LOOP|
- ((dtype != NULL) ? 0 : NPY_ITER_COMMON_DTYPE)|
- NPY_ITER_BUFFERED|
- NPY_ITER_DELAY_BUFALLOC|
- NPY_ITER_GROWINNER|
- NPY_ITER_REDUCE_OK|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK,
- order, casting,
- op_flags, op_dtypes,
- ndim_iter, op_axes, NULL, 0);
+ iter = NpyIter_AdvancedNew(nop+1, op, iter_flags, order, casting, op_flags,
+ op_dtypes, ndim_iter, op_axes, NULL, 0);
if (iter == NULL) {
goto fail;
}
- /* Initialize the output to all zeros and reset the iterator */
+ /* Initialize the output to all zeros */
ret = NpyIter_GetOperandArray(iter)[nop];
- Py_INCREF(ret);
- PyArray_AssignZero(ret, NULL);
-
+ if (PyArray_AssignZero(ret, NULL) < 0) {
+ goto fail;
+ }
/***************************/
/*
case 1:
if (ndim == 2) {
if (unbuffered_loop_nop1_ndim2(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
}
else if (ndim == 3) {
if (unbuffered_loop_nop1_ndim3(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
case 2:
if (ndim == 2) {
if (unbuffered_loop_nop2_ndim2(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
}
else if (ndim == 3) {
if (unbuffered_loop_nop2_ndim3(iter) < 0) {
- Py_DECREF(ret);
- ret = NULL;
goto fail;
}
goto finish;
/***************************/
if (NpyIter_Reset(iter, NULL) != NPY_SUCCEED) {
- Py_DECREF(ret);
goto fail;
}
if (sop == NULL) {
PyErr_SetString(PyExc_TypeError,
"invalid data type for einsum");
- Py_DECREF(ret);
- ret = NULL;
}
else if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
iternext = NpyIter_GetIterNext(iter, NULL);
if (iternext == NULL) {
NpyIter_Deallocate(iter);
- Py_DECREF(ret);
goto fail;
}
dataptr = NpyIter_GetDataPtrArray(iter);
/* If the API was needed, it may have thrown an error */
if (NpyIter_IterationNeedsAPI(iter) && PyErr_Occurred()) {
- Py_DECREF(ret);
- ret = NULL;
+ goto fail;
}
}
finish:
+ if (out != NULL) {
+ ret = out;
+ }
+ Py_INCREF(ret);
+
NpyIter_Deallocate(iter);
for (iop = 0; iop < nop; ++iop) {
Py_DECREF(op[iop]);
#include "npy_import.h"
#include "common.h"
+#include "ctors.h"
#include "scalartypes.h"
#include "descriptor.h"
#include "getset.h"
Py_DECREF(type);
type = new;
}
- ret = (PyArrayObject *)
- PyArray_NewFromDescr(Py_TYPE(self),
- type,
- PyArray_NDIM(self),
- PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_BYTES(self) + offset,
- PyArray_FLAGS(self), (PyObject *)self);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ Py_TYPE(self),
+ type,
+ PyArray_NDIM(self),
+ PyArray_DIMS(self),
+ PyArray_STRIDES(self),
+ PyArray_BYTES(self) + offset,
+ PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self);
if (ret == NULL) {
return NULL;
}
- Py_INCREF(self);
- if (PyArray_SetBaseObject(ret, (PyObject *)self) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- PyArray_CLEARFLAGS(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
return ret;
}
NPY_BEGIN_THREADS_DEF;
- rop = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
- PyArray_DIMS(op), NPY_INTP,
- NULL, NULL, 0, 0, (PyObject *)op);
+ rop = (PyArrayObject *)PyArray_NewFromDescr(
+ Py_TYPE(op), PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(op), PyArray_DIMS(op), NULL, NULL,
+ 0, (PyObject *)op);
if (rop == NULL) {
return NULL;
}
nd = PyArray_NDIM(mps[0]);
if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) {
/* single element case */
- ret = (PyArrayObject *)PyArray_New(&PyArray_Type, PyArray_NDIM(mps[0]),
- PyArray_DIMS(mps[0]),
- NPY_INTP,
- NULL, NULL, 0, 0, NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL,
+ 0, NULL);
if (ret == NULL) {
goto fail;
}
/* Now do the sorting */
- ret = (PyArrayObject *)PyArray_New(&PyArray_Type, PyArray_NDIM(mps[0]),
- PyArray_DIMS(mps[0]), NPY_INTP,
- NULL, NULL, 0, 0, NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL,
+ 0, NULL);
if (ret == NULL) {
goto fail;
}
}
/* ret is a contiguous array of intp type to hold returned indexes */
- ret = (PyArrayObject *)PyArray_New(&PyArray_Type, PyArray_NDIM(ap2),
- PyArray_DIMS(ap2), NPY_INTP,
- NULL, NULL, 0, 0, (PyObject *)ap2);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ PyArray_NDIM(ap2), PyArray_DIMS(ap2), NULL, NULL,
+ 0, (PyObject *)ap2);
if (ret == NULL) {
goto fail;
}
/* Create the diagonal view */
dtype = PyArray_DTYPE(self);
Py_INCREF(dtype);
- ret = PyArray_NewFromDescr(Py_TYPE(self),
- dtype,
- ndim-1, ret_shape,
- ret_strides,
- data,
- PyArray_FLAGS(self),
- (PyObject *)self);
+ ret = PyArray_NewFromDescrAndBase(
+ Py_TYPE(self), dtype,
+ ndim-1, ret_shape, ret_strides, data,
+ PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self);
if (ret == NULL) {
return NULL;
}
- Py_INCREF(self);
- if (PyArray_SetBaseObject((PyArrayObject *)ret, (PyObject *)self) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
/*
* For numpy 1.9 the diagonal view is not writeable.
/* Allocate the result as a 2D array */
ret_dims[0] = nonzero_count;
ret_dims[1] = (ndim == 0) ? 1 : ndim;
- ret = (PyArrayObject *)PyArray_New(&PyArray_Type, 2, ret_dims,
- NPY_INTP, NULL, NULL, 0, 0,
- NULL);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ 2, ret_dims, NULL, NULL,
+ 0, NULL);
if (ret == NULL) {
return NULL;
}
/* the result is an empty array, the view must point to valid memory */
npy_intp data_offset = is_empty ? 0 : i * NPY_SIZEOF_INTP;
- PyArrayObject *view = (PyArrayObject *)PyArray_New(Py_TYPE(ret), 1,
- &nonzero_count, NPY_INTP, &stride,
- PyArray_BYTES(ret) + data_offset,
- 0, PyArray_FLAGS(ret), (PyObject *)ret);
+ PyArrayObject *view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ Py_TYPE(ret), PyArray_DescrFromType(NPY_INTP),
+ 1, &nonzero_count, &stride, PyArray_BYTES(ret) + data_offset,
+ PyArray_FLAGS(ret), (PyObject *)ret, (PyObject *)ret);
if (view == NULL) {
Py_DECREF(ret);
Py_DECREF(ret_tuple);
return NULL;
}
- Py_INCREF(ret);
- if (PyArray_SetBaseObject(view, (PyObject *)ret) < 0) {
- Py_DECREF(ret);
- Py_DECREF(ret_tuple);
- return NULL;
- }
PyTuple_SET_ITEM(ret_tuple, i, (PyObject *)view);
}
Py_DECREF(ret);
Py_INCREF(PyArray_DESCR(it->ao));
if (PyArray_ISCONTIGUOUS(it->ao)) {
- ret = (PyArrayObject *)PyArray_NewFromDescr(
- &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
- NULL, PyArray_DATA(it->ao), PyArray_FLAGS(it->ao),
- (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, PyArray_DESCR(it->ao),
+ 1, &size, NULL, PyArray_DATA(it->ao),
+ PyArray_FLAGS(it->ao), (PyObject *)it->ao, (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
- Py_INCREF(it->ao);
- if (PyArray_SetBaseObject(ret, (PyObject *)it->ao) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
}
else {
ret = (PyArrayObject *)PyArray_NewFromDescr(
/*
* Advanded indexing iteration of arrays when there is a single indexing
* array which has the same memory order as the value array and both
- * can be trivally iterated (single stride, aligned, no casting necessary).
+ * can be trivially iterated (single stride, aligned, no casting necessary).
*/
NPY_NO_EXPORT int
mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
}
else {
/*
- * faster resetting if the subspace iteration is trival.
+ * faster resetting if the subspace iteration is trivial.
* reset_offsets are zero for positive strides,
* for negative strides this shifts the pointer to the last
* item.
|| index == Py_None
|| PySlice_Check(index)
|| PyArray_Check(index)
- || !PySequence_Check(index)) {
+ || !PySequence_Check(index)
+ || PyBaseString_Check(index)) {
return unpack_scalar(index, result, result_n);
}
if (commit_to_unpack) {
/* propagate errors */
if (tmp_obj == NULL) {
- multi_DECREF(result, i);
- return -1;
+ goto fail;
}
}
else {
|| PySlice_Check(tmp_obj)
|| tmp_obj == Py_Ellipsis
|| tmp_obj == Py_None) {
+ if (DEPRECATE_FUTUREWARNING(
+ "Using a non-tuple sequence for multidimensional "
+ "indexing is deprecated; use `arr[tuple(seq)]` "
+ "instead of `arr[seq]`. In the future this will be "
+ "interpreted as an array index, `arr[np.array(seq)]`, "
+ "which will result either in an error or a different "
+ "result.") < 0) {
+ i++; /* since loop update doesn't run */
+ goto fail;
+ }
commit_to_unpack = 1;
}
}
multi_DECREF(result, i);
return unpack_scalar(index, result, result_n);
}
+
+fail:
+ multi_DECREF(result, i);
+ return -1;
}
/**
/* Create the new view and set the base array */
Py_INCREF(PyArray_DESCR(self));
- *view = (PyArrayObject *)PyArray_NewFromDescr(
- ensure_array ? &PyArray_Type : Py_TYPE(self),
- PyArray_DESCR(self),
- new_dim, new_shape,
- new_strides, data_ptr,
- PyArray_FLAGS(self),
- ensure_array ? NULL : (PyObject *)self);
+ *view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ ensure_array ? &PyArray_Type : Py_TYPE(self),
+ PyArray_DESCR(self),
+ new_dim, new_shape, new_strides, data_ptr,
+ PyArray_FLAGS(self),
+ ensure_array ? NULL : (PyObject *)self,
+ (PyObject *)self);
if (*view == NULL) {
return -1;
}
- Py_INCREF(self);
- if (PyArray_SetBaseObject(*view, (PyObject *)self) < 0) {
- Py_DECREF(*view);
- return -1;
- }
-
return 0;
}
PyArrayObject *tmp = ret;
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), dtype, 1,
- &size, PyArray_STRIDES(ret), PyArray_BYTES(ret),
- PyArray_FLAGS(self), (PyObject *)self);
+ ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ Py_TYPE(self), dtype,
+ 1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret),
+ PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self);
if (ret == NULL) {
Py_DECREF(tmp);
return NULL;
}
-
- if (PyArray_SetBaseObject(ret, (PyObject *)tmp) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
}
return ret;
return PyArray_EnsureAnyArray(array_subscript(self, op));
}
+/*
+ * Helper function for _get_field_view which turns a multifield
+ * view into a "packed" copy, as done in numpy 1.15 and before.
+ * In numpy 1.16 this function should be removed.
+ */
+NPY_NO_EXPORT int
+_multifield_view_to_copy(PyArrayObject **view) {
+ static PyObject *copyfunc = NULL;
+ PyObject *viewcopy;
+
+ /* return a repacked copy of the view */
+ npy_cache_import("numpy.lib.recfunctions", "repack_fields", ©func);
+ if (copyfunc == NULL) {
+ goto view_fail;
+ }
+
+ PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
+ viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
+ if (viewcopy == NULL) {
+ goto view_fail;
+ }
+ Py_DECREF(*view);
+ *view = (PyArrayObject*)viewcopy;
+
+ /* warn when writing to the copy */
+ PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
+ return 0;
+
+view_fail:
+ Py_DECREF(*view);
+ *view = NULL;
+ return 0;
+}
+
/*
* Attempts to subscript an array using a field name or list of field names.
*
* If an error occurred, return 0 and set view to NULL. If the subscript is not
* a string or list of strings, return -1 and set view to NULL. Otherwise
* return 0 and set view to point to a new view into arr for the given fields.
+ *
+ * In numpy 1.15 and before, in the case of a list of field names the returned
+ * view will actually be a copy by default, with fields packed together.
+ * The `force_view` argument causes a view to be returned. This argument can be
+ * removed in 1.16 when we plan to return a view always.
*/
NPY_NO_EXPORT int
-_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
+_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
+ int force_view)
{
*view = NULL;
/* view the array at the new offset+dtype */
Py_INCREF(fieldtype);
*view = (PyArrayObject*)PyArray_NewFromDescr_int(
- Py_TYPE(arr),
- fieldtype,
- PyArray_NDIM(arr),
- PyArray_SHAPE(arr),
- PyArray_STRIDES(arr),
- PyArray_BYTES(arr) + offset,
- PyArray_FLAGS(arr),
- (PyObject *)arr, 0, 1);
+ Py_TYPE(arr),
+ fieldtype,
+ PyArray_NDIM(arr),
+ PyArray_SHAPE(arr),
+ PyArray_STRIDES(arr),
+ PyArray_BYTES(arr) + offset,
+ PyArray_FLAGS(arr),
+ (PyObject *)arr, (PyObject *)arr,
+ 0, 1);
if (*view == NULL) {
return 0;
}
- Py_INCREF(arr);
- if (PyArray_SetBaseObject(*view, (PyObject *)arr) < 0) {
- Py_DECREF(*view);
- *view = NULL;
- }
return 0;
}
/* next check for a list of field names */
Py_DECREF(names);
return 0;
}
- // disallow use of titles as index
+ /* disallow use of titles as index */
if (PyTuple_Size(tup) == 3) {
PyObject *title = PyTuple_GET_ITEM(tup, 2);
int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
if (titlecmp == 1) {
- // if title == name, we were given a title, not a field name
+ /* if title == name, we got a title, not a field name */
PyErr_SetString(PyExc_KeyError,
"cannot use field titles in multi-field index");
}
if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
- Py_DECREF(title);
Py_DECREF(name);
Py_DECREF(fields);
Py_DECREF(names);
return 0;
}
- Py_DECREF(title);
}
- // disallow duplicate field indices
+ /* disallow duplicate field indices */
if (PyDict_Contains(fields, name)) {
PyObject *errmsg = PyUString_FromString(
"duplicate field of name ");
view_dtype->flags = PyArray_DESCR(arr)->flags;
*view = (PyArrayObject*)PyArray_NewFromDescr_int(
- Py_TYPE(arr),
- view_dtype,
- PyArray_NDIM(arr),
- PyArray_SHAPE(arr),
- PyArray_STRIDES(arr),
- PyArray_DATA(arr),
- PyArray_FLAGS(arr),
- (PyObject *)arr, 0, 1);
+ Py_TYPE(arr),
+ view_dtype,
+ PyArray_NDIM(arr),
+ PyArray_SHAPE(arr),
+ PyArray_STRIDES(arr),
+ PyArray_DATA(arr),
+ PyArray_FLAGS(arr),
+ (PyObject *)arr, (PyObject *)arr,
+ 0, 1);
+
if (*view == NULL) {
return 0;
}
- Py_INCREF(arr);
- if (PyArray_SetBaseObject(*view, (PyObject *)arr) < 0) {
- Py_DECREF(*view);
- *view = NULL;
+
+ /* the code below can be replaced by "return 0" in 1.16 */
+ if (force_view) {
return 0;
}
-
- return 0;
+ return _multifield_view_to_copy(view);
}
return -1;
}
/* return fields if op is a string index */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
PyArrayObject *view;
- int ret = _get_field_view(self, op, &view);
+ int ret = _get_field_view(self, op, &view, 0);
if (ret == 0){
if (view == NULL) {
return NULL;
PyArrayObject *tmp_arr = (PyArrayObject *)result;
Py_INCREF(PyArray_DESCR(tmp_arr));
- result = PyArray_NewFromDescr(Py_TYPE(self),
- PyArray_DESCR(tmp_arr),
- PyArray_NDIM(tmp_arr),
- PyArray_SHAPE(tmp_arr),
- PyArray_STRIDES(tmp_arr),
- PyArray_BYTES(tmp_arr),
- PyArray_FLAGS(self),
- (PyObject *)self);
-
+ result = PyArray_NewFromDescrAndBase(
+ Py_TYPE(self),
+ PyArray_DESCR(tmp_arr),
+ PyArray_NDIM(tmp_arr),
+ PyArray_SHAPE(tmp_arr),
+ PyArray_STRIDES(tmp_arr),
+ PyArray_BYTES(tmp_arr),
+ PyArray_FLAGS(self),
+ (PyObject *)self, (PyObject *)tmp_arr);
+ Py_DECREF(tmp_arr);
if (result == NULL) {
- Py_DECREF(tmp_arr);
- goto finish;
- }
-
- if (PyArray_SetBaseObject((PyArrayObject *)result,
- (PyObject *)tmp_arr) < 0) {
- Py_DECREF(result);
- result = NULL;
goto finish;
}
}
/* field access */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))){
PyArrayObject *view;
- int ret = _get_field_view(self, ind, &view);
+ int ret = _get_field_view(self, ind, &view, 1);
if (ret == 0){
if (view == NULL) {
return -1;
PyArray_TRIVIALLY_ITERABLE_OP_READ,
PyArray_TRIVIALLY_ITERABLE_OP_READ) ||
(PyArray_NDIM(tmp_arr) == 0 &&
- PyArray_TRIVIALLY_ITERABLE(tmp_arr))) &&
+ PyArray_TRIVIALLY_ITERABLE(ind))) &&
/* Check if the type is equivalent to INTP */
PyArray_ITEMSIZE(ind) == sizeof(npy_intp) &&
PyArray_DESCR(ind)->kind == 'i' &&
/* create count-sized index arrays for each dimension */
for (j = 0; j < nd; j++) {
- new = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &count,
- NPY_INTP, NULL, NULL,
- 0, 0, NULL);
+ new = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ 1, &count, NULL, NULL,
+ 0, NULL);
if (new == NULL) {
goto fail;
}
static void
arraymapiter_dealloc(PyArrayMapIterObject *mit)
{
+ PyArray_ResolveWritebackIfCopy(mit->array);
Py_XDECREF(mit->array);
Py_XDECREF(mit->ait);
Py_XDECREF(mit->subspace);
Py_DECREF(safe);
}
- ret = PyArray_NewFromDescr_int(Py_TYPE(self),
- typed,
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_BYTES(self) + offset,
- PyArray_FLAGS(self)&(~NPY_ARRAY_F_CONTIGUOUS),
- (PyObject *)self, 0, 1);
- if (ret == NULL) {
- return NULL;
- }
- Py_INCREF(self);
- if (PyArray_SetBaseObject(((PyArrayObject *)ret), (PyObject *)self) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
-
- PyArray_UpdateFlags((PyArrayObject *)ret, NPY_ARRAY_UPDATE_ALL);
+ ret = PyArray_NewFromDescr_int(
+ Py_TYPE(self), typed,
+ PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self),
+ PyArray_BYTES(self) + offset,
+ PyArray_FLAGS(self) & ~NPY_ARRAY_F_CONTIGUOUS,
+ (PyObject *)self, (PyObject *)self,
+ 0, 1);
return ret;
}
static PyObject *
array_wraparray(PyArrayObject *self, PyObject *args)
{
- PyArrayObject *arr, *ret;
+ PyArrayObject *arr;
PyObject *obj;
if (PyTuple_Size(args) < 1) {
}
arr = (PyArrayObject *)obj;
- if (Py_TYPE(self) != Py_TYPE(arr)){
+ if (Py_TYPE(self) != Py_TYPE(arr)) {
PyArray_Descr *dtype = PyArray_DESCR(arr);
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self),
- dtype,
- PyArray_NDIM(arr),
- PyArray_DIMS(arr),
- PyArray_STRIDES(arr), PyArray_DATA(arr),
- PyArray_FLAGS(arr), (PyObject *)self);
- if (ret == NULL) {
- return NULL;
- }
- Py_INCREF(obj);
- if (PyArray_SetBaseObject(ret, obj) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- return (PyObject *)ret;
+ return PyArray_NewFromDescrAndBase(
+ Py_TYPE(self),
+ dtype,
+ PyArray_NDIM(arr),
+ PyArray_DIMS(arr),
+ PyArray_STRIDES(arr), PyArray_DATA(arr),
+ PyArray_FLAGS(arr), (PyObject *)self, obj);
} else {
/*The type was set in __array_prepare__*/
Py_INCREF(arr);
array_preparearray(PyArrayObject *self, PyObject *args)
{
PyObject *obj;
- PyArrayObject *arr, *ret;
+ PyArrayObject *arr;
PyArray_Descr *dtype;
if (PyTuple_Size(args) < 1) {
dtype = PyArray_DESCR(arr);
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self),
- dtype,
- PyArray_NDIM(arr),
- PyArray_DIMS(arr),
- PyArray_STRIDES(arr), PyArray_DATA(arr),
- PyArray_FLAGS(arr), (PyObject *)self);
- if (ret == NULL) {
- return NULL;
- }
- Py_INCREF(arr);
- if (PyArray_SetBaseObject(ret, (PyObject *)arr) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- return (PyObject *)ret;
+ return PyArray_NewFromDescrAndBase(
+ Py_TYPE(self), dtype,
+ PyArray_NDIM(arr), PyArray_DIMS(arr), PyArray_STRIDES(arr),
+ PyArray_DATA(arr),
+ PyArray_FLAGS(arr), (PyObject *)self, (PyObject *)arr);
}
PyArrayObject *new;
Py_INCREF(PyArray_DESCR(self));
- new = (PyArrayObject *)PyArray_NewFromDescr(
+ new = (PyArrayObject *)PyArray_NewFromDescrAndBase(
&PyArray_Type,
PyArray_DESCR(self),
PyArray_NDIM(self),
PyArray_STRIDES(self),
PyArray_DATA(self),
PyArray_FLAGS(self),
- NULL
+ NULL,
+ (PyObject *)self
);
if (new == NULL) {
return NULL;
}
- Py_INCREF(self);
- PyArray_SetBaseObject(new, (PyObject *)self);
self = new;
}
else {
}
/*
- * simulates a C-style 1-3 dimensional array which can be accesed using
+ * simulates a C-style 1-3 dimensional array which can be accessed using
* ptr[i] or ptr[i][j] or ptr[i][j][k] -- requires pointer allocation
* for 2-d and 3-d.
*
npy_intp newstrides[NPY_MAXDIMS];
npy_intp newstride;
int i, k, num;
- PyArrayObject *ret;
+ PyObject *ret;
PyArray_Descr *dtype;
if (order == NPY_FORTRANORDER || PyArray_ISFORTRAN(arr) || PyArray_NDIM(arr) == 0) {
}
dtype = PyArray_DESCR(arr);
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(arr),
- dtype, ndmin, newdims, newstrides,
- PyArray_DATA(arr),
- PyArray_FLAGS(arr),
- (PyObject *)arr);
- if (ret == NULL) {
- Py_DECREF(arr);
- return NULL;
- }
- /* steals a reference to arr --- so don't increment here */
- if (PyArray_SetBaseObject(ret, (PyObject *)arr) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
+ ret = PyArray_NewFromDescrAndBase(
+ Py_TYPE(arr), dtype,
+ ndmin, newdims, newstrides, PyArray_DATA(arr),
+ PyArray_FLAGS(arr), (PyObject *)arr, (PyObject *)arr);
+ Py_DECREF(arr);
- return (PyObject *)ret;
+ return ret;
}
if (DEPRECATE(
"The binary mode of fromstring is deprecated, as it behaves "
"surprisingly on unicode inputs. Use frombuffer instead") < 0) {
- Py_DECREF(descr);
+ Py_XDECREF(descr);
return NULL;
}
}
/* Subscript */
else if (PyInt_Check(item) || PyLong_Check(item)) {
long s = PyInt_AsLong(item);
- if ( s < 0 || s > 2*26) {
+ npy_bool bad_input = 0;
+
+ if (subindex + 1 >= subsize) {
PyErr_SetString(PyExc_ValueError,
- "subscript is not within the valid range [0, 52]");
+ "subscripts list is too long");
Py_DECREF(obj);
return -1;
}
- if (s < 26) {
- subscripts[subindex++] = 'A' + s;
+
+ if ( s < 0 ) {
+ bad_input = 1;
+ }
+ else if (s < 26) {
+ subscripts[subindex++] = 'A' + (char)s;
+ }
+ else if (s < 2*26) {
+ subscripts[subindex++] = 'a' + (char)s - 26;
}
else {
- subscripts[subindex++] = 'a' + s;
+ bad_input = 1;
}
- if (subindex >= subsize) {
+
+ if (bad_input) {
PyErr_SetString(PyExc_ValueError,
- "subscripts list is too long");
+ "subscript is not within the valid range [0, 52)");
Py_DECREF(obj);
return -1;
}
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, exp_digits,
* precision, which is equivalent to `None`.
}
/*
- * Prints floating-point scalars usign the Dragon4 algorithm, positional mode.
+ * Prints floating-point scalars using the Dragon4 algorithm, positional mode.
* See docstring of `np.format_float_positional` for description of arguments.
* The differences is that a value of -1 is valid for pad_left, pad_right,
* precision, which is equivalent to `None`.
/* Initialization function for the module */
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_multiarray(void) {
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC initmultiarray(void) {
#endif
PyObject *m, *d, *s;
/* Initialize access to the PyDateTime API */
numpy_pydatetime_import();
+ if (PyErr_Occurred()) {
+ goto err;
+ }
+
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
if (!d) {
*/
PyArray_Type.tp_hash = PyObject_HashNotImplemented;
if (PyType_Ready(&PyArray_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (setup_scalartypes(d) < 0) {
goto err;
PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter;
PyArrayMultiIter_Type.tp_free = PyArray_free;
if (PyType_Ready(&PyArrayIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayMapIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayMultiIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew;
if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&NpyIter_Type) < 0) {
- return RETVAL;
+ goto err;
}
PyArrayDescr_Type.tp_hash = PyArray_DescrHash;
if (PyType_Ready(&PyArrayDescr_Type) < 0) {
- return RETVAL;
+ goto err;
}
if (PyType_Ready(&PyArrayFlags_Type) < 0) {
- return RETVAL;
+ goto err;
}
NpyBusDayCalendar_Type.tp_new = PyType_GenericNew;
if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) {
- return RETVAL;
+ goto err;
}
c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
if (set_typeinfo(d) != 0) {
goto err;
}
- return RETVAL;
+
+ return RETVAL(m);
err:
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load multiarray module.");
}
- return RETVAL;
+ return RETVAL(NULL);
}
#define NPY_ITERATOR_IMPLEMENTATION_CODE
#include "nditer_impl.h"
#include "templ_common.h"
+#include "ctors.h"
/* Internal helper functions private to this file */
static npy_intp
}
Py_INCREF(dtype);
- view = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, ndim,
- shape, strides, dataptr,
- writeable ? NPY_ARRAY_WRITEABLE : 0,
- NULL);
- if (view == NULL) {
- return NULL;
- }
- /* Tell the view who owns the data */
- Py_INCREF(obj);
- if (PyArray_SetBaseObject(view, (PyObject *)obj) < 0) {
- Py_DECREF(view);
- return NULL;
- }
- /* Make sure all the flags are good */
- PyArray_UpdateFlags(view, NPY_ARRAY_UPDATE_ALL);
+ view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ &PyArray_Type, dtype,
+ ndim, shape, strides, dataptr,
+ writeable ? NPY_ARRAY_WRITEABLE : 0, NULL, (PyObject *)obj);
return view;
}
return count * (*reduce_innersize);
}
+NPY_NO_EXPORT npy_bool
+npyiter_has_writeback(NpyIter *iter)
+{
+ int iop, nop;
+ npyiter_opitflags *op_itflags;
+ if (iter == NULL) {
+ return 0;
+ }
+ nop = NIT_NOP(iter);
+ op_itflags = NIT_OPITFLAGS(iter);
+
+ for (iop=0; iop<nop; iop++) {
+ if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
+ return NPY_TRUE;
+ }
+ }
+ return NPY_FALSE;
+}
#undef NPY_ITERATOR_IMPLEMENTATION_CODE
int iop, nop;
PyArray_Descr **dtype;
PyArrayObject **object;
+ npyiter_opitflags *op_itflags;
+ npy_bool resolve = 1;
if (iter == NULL) {
return NPY_SUCCEED;
nop = NIT_NOP(iter);
dtype = NIT_DTYPES(iter);
object = NIT_OPERANDS(iter);
+ op_itflags = NIT_OPITFLAGS(iter);
/* Deallocate any buffers and buffering data */
if (itflags & NPY_ITFLAG_BUFFER) {
}
}
- /* Deallocate all the dtypes and objects that were iterated */
+ /*
+ * Deallocate all the dtypes and objects that were iterated and resolve
+ * any writeback buffers created by the iterator
+ */
for(iop = 0; iop < nop; ++iop, ++dtype, ++object) {
+ if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
+ if (resolve && PyArray_ResolveWritebackIfCopy(*object) < 0) {
+ resolve = 0;
+ }
+ else {
+ PyArray_DiscardWritebackIfCopy(*object);
+ }
+ }
Py_XDECREF(*dtype);
Py_XDECREF(*object);
}
/* Deallocate the iterator memory */
PyObject_Free(iter);
-
+ if (resolve == 0) {
+ return NPY_FAIL;
+ }
return NPY_SUCCEED;
}
return NULL;
}
- /* Make sure all the flags are good */
- PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
-
/* Double-check that the subtype didn't mess with the dimensions */
if (subtype != &PyArray_Type) {
if (PyArray_NDIM(ret) != op_ndim ||
*
* If any write operand has memory overlap with any read operand,
* eliminate all overlap by making temporary copies, by enabling
- * NPY_OP_ITFLAG_FORCECOPY for the write operand to force UPDATEIFCOPY.
+ * NPY_OP_ITFLAG_FORCECOPY for the write operand to force WRITEBACKIFCOPY.
*
* Operands with NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE enabled are not
* considered overlapping if the arrays are exactly the same. In this
return 0;
}
}
- /* If the data will be written to, set UPDATEIFCOPY */
+ /* If the data will be written to, set WRITEBACKIFCOPY
+ and require a context manager */
if (op_itflags[iop] & NPY_OP_ITFLAG_WRITE) {
Py_INCREF(op[iop]);
- if (PyArray_SetUpdateIfCopyBase(temp, op[iop]) < 0) {
+ if (PyArray_SetWritebackIfCopyBase(temp, op[iop]) < 0) {
Py_DECREF(temp);
return 0;
}
+ op_itflags[iop] |= NPY_OP_ITFLAG_HAS_WRITEBACK;
}
Py_DECREF(op[iop]);
#define NPY_OP_ITFLAG_USINGBUFFER 0x0100
/* The operand must be copied (with UPDATEIFCOPY if also ITFLAG_WRITE) */
#define NPY_OP_ITFLAG_FORCECOPY 0x0200
+/* The operand has temporary data, write it back at dealloc */
+#define NPY_OP_ITFLAG_HAS_WRITEBACK 0x0400
/*
* The data layout of the iterator is fully specified by
/*
- * This file implements the CPython wrapper of the new NumPy iterator.
+ * This file implements the CPython wrapper of NpyIter
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
* The University of British Columbia
#include "npy_pycompat.h"
#include "alloc.h"
#include "common.h"
+#include "ctors.h"
+
+/* Functions not part of the public NumPy C API */
+npy_bool npyiter_has_writeback(NpyIter *iter);
+
typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject;
PyErr_SetString(PyExc_TypeError,
"Iterator operand is flagged as writeable, "
"but is an object which cannot be written "
- "back to via UPDATEIFCOPY");
+ "back to via WRITEBACKIFCOPY");
}
for (iop = 0; iop < nop; ++iop) {
Py_DECREF(op[iop]);
return NULL;
}
+
static void
npyiter_dealloc(NewNpyArrayIterObject *self)
{
if (self->iter) {
+ if (npyiter_has_writeback(self->iter)) {
+ if (PyErr_WarnEx(PyExc_RuntimeWarning,
+ "Temporary data has not been written back to one of the "
+ "operands. Typically nditer is used as a context manager "
+ "otherwise 'close' must be called before reading iteration "
+ "results.", 1) < 0) {
+ PyObject *s;
+
+ s = PyUString_FromString("npyiter_dealloc");
+ if (s) {
+ PyErr_WriteUnraisable(s);
+ Py_DECREF(s);
+ }
+ else {
+ PyErr_WriteUnraisable(Py_None);
+ }
+ }
+ }
NpyIter_Deallocate(self->iter);
self->iter = NULL;
Py_XDECREF(self->nested_child);
"Iterator is invalid");
return NULL;
}
-
nop = NpyIter_GetNOp(self->iter);
operands = self->operands;
"Iterator is invalid");
return NULL;
}
-
nop = NpyIter_GetNOp(self->iter);
ret = PyTuple_New(nop);
static PyObject *
npyiter_next(NewNpyArrayIterObject *self)
{
- if (self->iter == NULL || self->iternext == NULL || self->finished) {
+ if (self->iter == NULL || self->iternext == NULL ||
+ self->finished) {
return NULL;
}
"Iterator is invalid");
return NULL;
}
-
nop = NpyIter_GetNOp(self->iter);
ret = PyTuple_New(nop);
NPY_NO_EXPORT PyObject *
npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i)
{
- PyArrayObject *ret;
-
npy_intp ret_ndim;
npy_intp nop, innerloopsize, innerstride;
char *dataptr;
"and no reset has been done yet");
return NULL;
}
-
nop = NpyIter_GetNOp(self->iter);
/* Negative indexing */
}
Py_INCREF(dtype);
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype,
- ret_ndim, &innerloopsize,
- &innerstride, dataptr,
- self->writeflags[i] ? NPY_ARRAY_WRITEABLE : 0, NULL);
- if (ret == NULL) {
- return NULL;
- }
- Py_INCREF(self);
- if (PyArray_SetBaseObject(ret, (PyObject *)self) < 0) {
- Py_XDECREF(ret);
- return NULL;
- }
-
- PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
-
- return (PyObject *)ret;
+ return PyArray_NewFromDescrAndBase(
+ &PyArray_Type, dtype,
+ ret_ndim, &innerloopsize, &innerstride, dataptr,
+ self->writeflags[i] ? NPY_ARRAY_WRITEABLE : 0,
+ NULL, (PyObject *)self);
}
NPY_NO_EXPORT PyObject *
"and no reset has been done yet");
return NULL;
}
-
nop = NpyIter_GetNOp(self->iter);
if (ilow < 0) {
ilow = 0;
"and no reset has been done yet");
return -1;
}
-
nop = NpyIter_GetNOp(self->iter);
/* Negative indexing */
return -1;
}
- PyArray_UpdateFlags(tmp, NPY_ARRAY_UPDATE_ALL);
-
ret = PyArray_CopyObject(tmp, v);
Py_DECREF(tmp);
return ret;
"and no reset has been done yet");
return -1;
}
-
nop = NpyIter_GetNOp(self->iter);
if (ilow < 0) {
ilow = 0;
return -1;
}
+static PyObject *
+npyiter_enter(NewNpyArrayIterObject *self)
+{
+ if (self->iter == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "operation on non-initialized iterator");
+ return NULL;
+ }
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *
+npyiter_close(NewNpyArrayIterObject *self)
+{
+ NpyIter *iter = self->iter;
+ int ret;
+ if (self->iter == NULL) {
+ Py_RETURN_NONE;
+ }
+ ret = NpyIter_Deallocate(iter);
+ self->iter = NULL;
+ if (ret < 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
+{
+ /* even if called via exception handling, writeback any data */
+ return npyiter_close(self);
+}
+
static PyMethodDef npyiter_methods[] = {
{"reset",
(PyCFunction)npyiter_reset,
{"debug_print",
(PyCFunction)npyiter_debug_print,
METH_NOARGS, NULL},
+ {"__enter__", (PyCFunction)npyiter_enter,
+ METH_NOARGS, NULL},
+ {"__exit__", (PyCFunction)npyiter_exit,
+ METH_VARARGS, NULL},
+ {"close", (PyCFunction)npyiter_close,
+ METH_VARARGS, NULL},
{NULL, NULL, 0, NULL},
};
#include "binop_override.h"
-/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
-#if (PY_VERSION_HEX < 0x02070B00) || \
- ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
- #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
-#else
- #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
-#endif
-
-
/*************************************************************************
**************** Implement Number Protocol ****************************
*************************************************************************/
double exponent;
NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */
- if (PyArray_Check(a1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
+ if (PyArray_Check(a1) &&
+ !PyArray_ISOBJECT(a1) &&
+ ((kind=is_scalar_with_conversion(o2, &exponent))>0)) {
PyObject *fastop = NULL;
if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) {
if (exponent == 1.0) {
n = PyArray_SIZE(mp);
if (n == 1) {
int res;
- if (_Py_EnterRecursiveCall(" while converting array to bool")) {
+ if (Npy_EnterRecursiveCall(" while converting array to bool")) {
return -1;
}
res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp);
/* Need to guard against recursion if our array holds references */
if (PyDataType_REFCHK(PyArray_DESCR(v))) {
PyObject *res;
- if (_Py_EnterRecursiveCall(where) != 0) {
+ if (Npy_EnterRecursiveCall(where) != 0) {
Py_DECREF(scalar);
return NULL;
}
if ((typecode->type_num == NPY_VOID) &&
!(((PyVoidScalarObject *)scalar)->flags & NPY_ARRAY_OWNDATA) &&
outcode == NULL) {
- r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- typecode,
+ return PyArray_NewFromDescrAndBase(
+ &PyArray_Type, typecode,
0, NULL, NULL,
((PyVoidScalarObject *)scalar)->obval,
((PyVoidScalarObject *)scalar)->flags,
- NULL);
- if (r == NULL) {
- return NULL;
- }
- Py_INCREF(scalar);
- if (PyArray_SetBaseObject(r, (PyObject *)scalar) < 0) {
- Py_DECREF(r);
- return NULL;
- }
- return (PyObject *)r;
+ NULL, (PyObject *)scalar);
}
/* Need to INCREF typecode because PyArray_NewFromDescr steals a
/**begin repeat
* #name = half, float, double, longdouble#
+ * #Name = Half, Float, Double, LongDouble#
* #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE#
* #type = npy_half, npy_float, npy_double, npy_longdouble#
* #suff = h, f, d, l#
int pad_left, int pad_right, int exp_digits)
{
if (scientific) {
- return Dragon4_Scientific_AnySize(&val, sizeof(@type@),
+ return Dragon4_Scientific_@Name@(&val,
DigitMode_Unique, precision,
sign, trim, pad_left, exp_digits);
}
else {
- return Dragon4_Positional_AnySize(&val, sizeof(@type@),
+ return Dragon4_Positional_@Name@(&val,
DigitMode_Unique, CutoffMode_TotalLength, precision,
sign, trim, pad_left, pad_right);
}
/* 0-dim array from scalar object */
-static char doc_getarray[] = "sc.__array__(|type) return 0-dim array";
+static char doc_getarray[] = "sc.__array__(dtype) return 0-dim array from "
+ "scalar with specified dtype";
static PyObject *
gentype_getarray(PyObject *scalar, PyObject *args)
return NULL;
}
-static PyObject *
-gentype_squeeze(PyObject *self, PyObject *args)
-{
- if (!PyArg_ParseTuple(args, "")) {
- return NULL;
- }
- Py_INCREF(self);
- return self;
-}
-
static Py_ssize_t
gentype_getreadbuf(PyObject *, Py_ssize_t, void **);
* std, var, sum, cumsum, prod, cumprod, compress, sort, argsort,
* round, argmax, argmin, max, min, ptp, any, all, astype, resize,
* reshape, choose, tostring, tobytes, copy, searchsorted, view,
- * flatten, ravel#
+ * flatten, ravel, squeeze#
*/
static PyObject *
gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds)
METH_VARARGS | METH_KEYWORDS, NULL},
{"squeeze",
(PyCFunction)gentype_squeeze,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"view",
(PyCFunction)gentype_view,
METH_VARARGS | METH_KEYWORDS, NULL},
}
}
+#ifndef NPY_PY3K
+/*
+ * In python2, the `float` and `complex` types still implement the obsolete
+ * "tp_print" method, which uses CPython's float-printing routines to print the
+ * float. Numpy's float_/cfloat inherit from Python float/complex, but
+ * override its tp_repr and tp_str methods. In order to avoid an inconsistency
+ * with the inherited tp_print, we need to override it too.
+ *
+ * In python3 the tp_print method is reserved/unused.
+ */
+static int
+doubletype_print(PyObject *o, FILE *fp, int flags)
+{
+ int ret;
+ PyObject *to_print;
+ if (flags & Py_PRINT_RAW) {
+ to_print = PyObject_Str(o);
+ }
+ else {
+ to_print = PyObject_Repr(o);
+ }
+
+ if (to_print == NULL) {
+ return -1;
+ }
+
+ ret = PyObject_Print(to_print, fp, Py_PRINT_RAW);
+ Py_DECREF(to_print);
+ return ret;
+}
+#endif
static PyNumberMethods longdoubletype_as_number;
static PyNumberMethods clongdoubletype_as_number;
/**end repeat**/
+#ifndef NPY_PY3K
+ PyDoubleArrType_Type.tp_print = &doubletype_print;
+ PyCDoubleArrType_Type.tp_print = &doubletype_print;
+#endif
+
+
PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index;
PyStringArrType_Type.tp_alloc = NULL;
npy_intp *dimensions = newdims->ptr;
PyArrayObject *ret;
int ndim = newdims->len;
- npy_bool same, incref = NPY_TRUE;
+ npy_bool same;
npy_intp *strides = NULL;
npy_intp newstrides[NPY_MAXDIMS];
int flags;
* data in the order it is in.
* NPY_RELAXED_STRIDES_CHECKING: size check is unnecessary when set.
*/
+ Py_INCREF(self);
if ((PyArray_SIZE(self) > 1) &&
((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
(order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) {
else {
PyObject *newcopy;
newcopy = PyArray_NewCopy(self, order);
+ Py_DECREF(self);
if (newcopy == NULL) {
return NULL;
}
- incref = NPY_FALSE;
self = (PyArrayObject *)newcopy;
}
}
}
Py_INCREF(PyArray_DESCR(self));
- ret = (PyArrayObject *)PyArray_NewFromDescr_int(Py_TYPE(self),
- PyArray_DESCR(self),
- ndim, dimensions,
- strides,
- PyArray_DATA(self),
- flags, (PyObject *)self, 0, 1);
-
- if (ret == NULL) {
- goto fail;
- }
-
- if (incref) {
- Py_INCREF(self);
- }
- if (PyArray_SetBaseObject(ret, (PyObject *)self)) {
- Py_DECREF(ret);
- return NULL;
- }
-
- PyArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
+ ret = (PyArrayObject *)PyArray_NewFromDescr_int(
+ Py_TYPE(self), PyArray_DESCR(self),
+ ndim, dimensions, strides, PyArray_DATA(self),
+ flags, (PyObject *)self, (PyObject *)self,
+ 0, 1);
+ Py_DECREF(self);
return (PyObject *)ret;
-
- fail:
- if (!incref) {
- Py_DECREF(self);
- }
- return NULL;
}
* incorrectly), sets up descr, and points data at PyArray_DATA(ap).
*/
Py_INCREF(PyArray_DESCR(ap));
- ret = (PyArrayObject *)
- PyArray_NewFromDescr(Py_TYPE(ap),
- PyArray_DESCR(ap),
- n, PyArray_DIMS(ap),
- NULL, PyArray_DATA(ap),
- flags,
- (PyObject *)ap);
+ ret = (PyArrayObject *) PyArray_NewFromDescrAndBase(
+ Py_TYPE(ap), PyArray_DESCR(ap),
+ n, PyArray_DIMS(ap), NULL, PyArray_DATA(ap),
+ flags, (PyObject *)ap, (PyObject *)ap);
if (ret == NULL) {
return NULL;
}
- /* point at true owner of memory: */
- Py_INCREF(ap);
- if (PyArray_SetBaseObject(ret, (PyObject *)ap) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
/* fix the dimensions and strides of the return-array */
for (i = 0; i < n; i++) {
/* If all the strides matched a contiguous layout, return a view */
if (i < 0) {
- PyArrayObject *ret;
-
stride = PyArray_ITEMSIZE(arr);
val[0] = PyArray_SIZE(arr);
Py_INCREF(PyArray_DESCR(arr));
- ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(arr),
- PyArray_DESCR(arr),
- 1, val,
- &stride,
- PyArray_BYTES(arr),
- PyArray_FLAGS(arr),
- (PyObject *)arr);
- if (ret == NULL) {
- return NULL;
- }
-
- PyArray_UpdateFlags(ret,
- NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS);
- Py_INCREF(arr);
- if (PyArray_SetBaseObject(ret, (PyObject *)arr) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
-
- return (PyObject *)ret;
+ return PyArray_NewFromDescrAndBase(
+ Py_TYPE(arr), PyArray_DESCR(arr),
+ 1, val, &stride, PyArray_BYTES(arr),
+ PyArray_FLAGS(arr), (PyObject *)arr, (PyObject *)arr);
}
}
*/
#include "npy_math_common.h"
#include "npy_math_private.h"
+#include "numpy/utils.h"
#ifndef HAVE_COPYSIGN
double npy_copysign(double x, double y)
{
npy_int64 hx,ihx,ilx;
npy_uint64 lx;
+ npy_longdouble u;
GET_LDOUBLE_WORDS64(hx, lx, x);
ihx = hx & 0x7fffffffffffffffLL; /* |hx| */
return x; /* signal the nan */
}
if(ihx == 0 && ilx == 0) { /* x == 0 */
- npy_longdouble u;
SET_LDOUBLE_WORDS64(x, p, 0ULL);/* return +-minsubnormal */
u = x * x;
if (u == x) {
}
}
- npy_longdouble u;
if(p < 0) { /* p < 0, x -= ulp */
if((hx==0xffefffffffffffffLL)&&(lx==0xfc8ffffffffffffeLL))
return x+x; /* overflow, return -inf */
}
#endif
+int npy_clear_floatstatus() {
+ char x=0;
+ return npy_clear_floatstatus_barrier(&x);
+}
+int npy_get_floatstatus() {
+ char x=0;
+ return npy_get_floatstatus_barrier(&x);
+}
+
/*
* Functions to set the floating point status word.
* keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
defined(__NetBSD__)
#include <ieeefp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char * param)
{
int fpstatus = fpgetsticky();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fpsetsticky(0);
return fpstatus;
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
# include <fenv.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char* param)
{
int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
/* testing float status is 50-100 times faster than clearing on x86 */
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
if (fpstatus != 0) {
feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
FE_UNDERFLOW | FE_INVALID);
#include <float.h>
#include <fpxcp.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
int fpstatus = fp_read_flag();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char * param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
fp_swap_flag(0);
return fpstatus;
#include <float.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
#if defined(_WIN64)
int fpstatus = _statusfp();
#else
_statusfp2(&fpstatus, &fpstatus2);
fpstatus |= fpstatus2;
#endif
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- int fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
_clearfp();
return fpstatus;
#include <machine/fpu.h>
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *param)
{
unsigned long fpstatus = ieee_get_fp_control();
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
- long fpstatus = npy_get_floatstatus();
+ int fpstatus = npy_get_floatstatus_barrier(param);
/* clear status bits as well as disable exception mode if on */
ieee_set_fp_control(0);
#else
-int npy_get_floatstatus(void)
+int npy_get_floatstatus_barrier(char *NPY_UNUSED(param))
{
return 0;
}
-int npy_clear_floatstatus(void)
+int npy_clear_floatstatus_barrier(char *param)
{
+ int fpstatus = npy_get_floatstatus_barrier(param);
return 0;
}
typedef npy_uint32 ldouble_man_t;
typedef npy_uint32 ldouble_exp_t;
typedef npy_uint32 ldouble_sign_t;
-#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
- defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE)
+#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE)
/* 64 bits IEEE double precision aligned on 16 bytes: used by ppc arch on
* Mac OS X */
typedef npy_uint32 ldouble_sign_t;
#endif
-#if !defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) && \
- !defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE)
+#if !defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) && \
+ !defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE)
/* Get the sign bit of x. x should be of type IEEEl2bitsrep */
#define GET_LDOUBLE_SIGN(x) \
(((x).a[LDBL_SIGN_INDEX] & LDBL_SIGN_MASK) >> LDBL_SIGN_SHIFT)
((x).a[LDBL_MANH_INDEX] & ~LDBL_MANH_MASK) | \
(((IEEEl2bitsrep_part)(v) << LDBL_MANH_SHIFT) & LDBL_MANH_MASK))
-#endif /* #ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE */
+#endif /* !HAVE_LDOUBLE_DOUBLE_DOUBLE_* */
/*
* Those unions are used to convert a pointer of npy_cdouble to native C99
{
npy_intp min_idx = 0;
npy_intp max_idx = arr_len;
- @type@ last_key_val = *(const @type@ *)key;
+ @type@ last_key_val;
+
+ if (key_len == 0) {
+ return;
+ }
+ last_key_val = *(const @type@ *)key;
for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
const @type@ key_val = *(const @type@ *)key;
{
npy_intp min_idx = 0;
npy_intp max_idx = arr_len;
- @type@ last_key_val = *(const @type@ *)key;
+ @type@ last_key_val;
+
+ if (key_len == 0) {
+ return 0;
+ }
+ last_key_val = *(const @type@ *)key;
for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
const @type@ key_val = *(const @type@ *)key;
#define PyArray_TRIVIALLY_ITERABLE_OP_NOREAD 0
#define PyArray_TRIVIALLY_ITERABLE_OP_READ 1
-#define PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr2) ( \
- PyArray_NDIM(arr1) == PyArray_NDIM(arr2) && \
- PyArray_CompareLists(PyArray_DIMS(arr1), \
- PyArray_DIMS(arr2), \
- PyArray_NDIM(arr1)) && \
- (PyArray_FLAGS(arr1)&(NPY_ARRAY_C_CONTIGUOUS| \
- NPY_ARRAY_F_CONTIGUOUS)) & \
- (PyArray_FLAGS(arr2)&(NPY_ARRAY_C_CONTIGUOUS| \
- NPY_ARRAY_F_CONTIGUOUS)) \
- )
+#define PyArray_TRIVIALLY_ITERABLE(arr) ( \
+ PyArray_NDIM(arr) <= 1 || \
+ PyArray_CHKFLAGS(arr, NPY_ARRAY_C_CONTIGUOUS) || \
+ PyArray_CHKFLAGS(arr, NPY_ARRAY_F_CONTIGUOUS) \
+ )
#define PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size, arr) ( \
- size == 1 ? 0 : ((PyArray_NDIM(arr) == 1) ? \
- PyArray_STRIDE(arr, 0) : \
- PyArray_ITEMSIZE(arr)))
+ assert(PyArray_TRIVIALLY_ITERABLE(arr)), \
+ size == 1 ? 0 : ((PyArray_NDIM(arr) == 1) ? \
+ PyArray_STRIDE(arr, 0) : PyArray_ITEMSIZE(arr)))
static NPY_INLINE int
PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr2,
return (!arr1_read || arr1_ahead) && (!arr2_read || arr2_ahead);
}
+#define PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr2) ( \
+ PyArray_NDIM(arr1) == PyArray_NDIM(arr2) && \
+ PyArray_CompareLists(PyArray_DIMS(arr1), \
+ PyArray_DIMS(arr2), \
+ PyArray_NDIM(arr1)) && \
+ (PyArray_FLAGS(arr1)&(NPY_ARRAY_C_CONTIGUOUS| \
+ NPY_ARRAY_F_CONTIGUOUS)) & \
+ (PyArray_FLAGS(arr2)&(NPY_ARRAY_C_CONTIGUOUS| \
+ NPY_ARRAY_F_CONTIGUOUS)) \
+ )
+
#define PyArray_EQUIVALENTLY_ITERABLE(arr1, arr2, arr1_read, arr2_read) ( \
PyArray_EQUIVALENTLY_ITERABLE_BASE(arr1, arr2) && \
PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK( \
arr1, arr2, arr1_read, arr2_read))
-#define PyArray_TRIVIALLY_ITERABLE(arr) ( \
- PyArray_NDIM(arr) <= 1 || \
- PyArray_CHKFLAGS(arr, NPY_ARRAY_C_CONTIGUOUS) || \
- PyArray_CHKFLAGS(arr, NPY_ARRAY_F_CONTIGUOUS) \
- )
+
#define PyArray_PREPARE_TRIVIAL_ITERATION(arr, count, data, stride) \
count = PyArray_SIZE(arr); \
data = PyArray_BYTES(arr); \
PyArray_STRIDE(arr, 0) : \
PyArray_ITEMSIZE(arr)));
-
#define PyArray_TRIVIALLY_ITERABLE_PAIR(arr1, arr2, arr1_read, arr2_read) ( \
PyArray_TRIVIALLY_ITERABLE(arr1) && \
(PyArray_NDIM(arr2) == 0 || \
#include "numpy/npy_cpu.h"
#include "numpy/npy_common.h"
-#ifdef NPY_OS_DARWIN
- /* This hardcoded logic is fragile, but universal builds makes it
- * difficult to detect arch-specific features */
-
- /* MAC OS X < 10.4 and gcc < 4 does not support proper long double, and
- * is the same as double on those platforms */
- #if NPY_BITSOF_LONGDOUBLE == NPY_BITSOF_DOUBLE
- /* This assumes that FPU and ALU have the same endianness */
- #if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
- #define HAVE_LDOUBLE_IEEE_DOUBLE_LE
- #elif NPY_BYTE_ORDER == NPY_BIG_ENDIAN
- #define HAVE_LDOUBLE_IEEE_DOUBLE_BE
- #else
- #error Endianness undefined ?
- #endif
- #else
- #if defined(NPY_CPU_X86)
- #define HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE
- #elif defined(NPY_CPU_AMD64)
- #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
- #elif defined(NPY_CPU_PPC) || defined(NPY_CPU_PPC64)
- #define HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE
- #elif defined(NPY_CPU_PPC64LE)
- #define HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_LE
- #endif
- #endif
-#endif
-
#if !(defined(HAVE_LDOUBLE_IEEE_QUAD_BE) || \
defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \
defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE) || \
defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE) || \
- defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \
defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \
defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) || \
- defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \
- defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE))
+ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) || \
+ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE))
#error No long double representation defined
#endif
+/* for back-compat, also keep old name for double-double */
+#ifdef HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE
+ #define HAVE_LDOUBLE_DOUBLE_DOUBLE_LE
+#endif
+#ifdef HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE
+ #define HAVE_LDOUBLE_DOUBLE_DOUBLE_BE
+#endif
+
#endif
if (methods != NULL) {
methods[num_override_args] = method;
}
+ else {
+ Py_DECREF(method);
+ }
++num_override_args;
}
}
fail:
if (methods != NULL) {
for (i = 0; i < num_override_args; i++) {
- Py_XDECREF(methods[i]);
+ Py_DECREF(methods[i]);
+ }
+ }
+ if (with_override != NULL) {
+ for (i = 0; i < num_override_args; i++) {
+ Py_DECREF(with_override[i]);
}
}
return -1;
/**end repeat**/
+char *cumsum_signature = "(i)->(i)";
+
+/*
+ * This implements the function
+ * out[n] = sum_i^n in[i]
+ */
+
+/**begin repeat
+
+ #TYPE=LONG,DOUBLE#
+ #typ=npy_long,npy_double#
+*/
+
+static void
+@TYPE@_cumsum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_2
+ npy_intp di = dimensions[0];
+ npy_intp i;
+ npy_intp is=steps[0], os=steps[1];
+ BEGIN_OUTER_LOOP_2
+ char *ip=args[0], *op=args[1];
+ @typ@ cumsum = 0;
+ for (i = 0; i < di; i++, ip += is, op += os) {
+ cumsum += (*(@typ@ *)ip);
+ *(@typ@ *)op = cumsum;
+ }
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d };
static void * inner1d_data[] = { (void *)NULL, (void *)NULL };
static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT,
NPY_DOUBLE, NPY_DOUBLE };
+static PyUFuncGenericFunction cumsum_functions[] = { LONG_cumsum, DOUBLE_cumsum };
+static void * cumsum_data[] = { (void *)NULL, (void *)NULL };
+static char cumsum_signatures[] = { NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE };
-static void
+
+static int
addUfuncs(PyObject *dictionary) {
PyObject *f;
"inner on the last dimension and broadcast on the rest \n"
" \"(i),(i)->()\" \n",
0, inner1d_signature);
+ /*
+ * yes, this should not happen, but I (MHvK) just spent an hour looking at
+ * segfaults because I screwed up something that seemed totally unrelated.
+ */
+ if (f == NULL) {
+ return -1;
+ }
PyDict_SetItemString(dictionary, "inner1d", f);
Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data,
"inner1d with a weight argument \n"
" \"(i),(i),(i)->()\" \n",
0, innerwt_signature);
+ if (f == NULL) {
+ return -1;
+ }
PyDict_SetItemString(dictionary, "innerwt", f);
Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions,
"matrix multiplication on last two dimensions \n"
" \"(m,n),(n,p)->(m,p)\" \n",
0, matrix_multiply_signature);
+ if (f == NULL) {
+ return -1;
+ }
PyDict_SetItemString(dictionary, "matrix_multiply", f);
Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(euclidean_pdist_functions,
"pairwise euclidean distance on last two dimensions \n"
" \"(n,d)->(p)\" \n",
0, euclidean_pdist_signature);
+ if (f == NULL) {
+ return -1;
+ }
PyDict_SetItemString(dictionary, "euclidean_pdist", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(cumsum_functions,
+ cumsum_data, cumsum_signatures,
+ 2, 1, 1, PyUFunc_None, "cumsum",
+ "Cumulative sum of the input (n)->(n)\n",
+ 0, cumsum_signature);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "cumsum", f);
+ Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data,
inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d_no_doc",
NULL,
0, inner1d_signature);
+ if (f == NULL) {
+ return -1;
+ }
PyDict_SetItemString(dictionary, "inner1d_no_doc", f);
Py_DECREF(f);
+
+ return 0;
}
static PyObject *
UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
- int nin, nout;
+ int nin, nout, i;
PyObject *signature, *sig_str;
- PyObject *f;
+ PyUFuncObject *f = NULL;
+ PyObject *core_num_dims = NULL, *core_dim_ixs = NULL;
int core_enabled;
+ int core_num_ixs = 0;
- if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) return NULL;
-
+ if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) {
+ return NULL;
+ }
if (PyString_Check(signature)) {
sig_str = signature;
return NULL;
}
- f = PyUFunc_FromFuncAndDataAndSignature(NULL, NULL, NULL,
+ f = (PyUFuncObject*)PyUFunc_FromFuncAndDataAndSignature(
+ NULL, NULL, NULL,
0, nin, nout, PyUFunc_None, "no name",
"doc:none",
1, PyString_AS_STRING(sig_str));
if (sig_str != signature) {
Py_DECREF(sig_str);
}
- if (f == NULL) return NULL;
- core_enabled = ((PyUFuncObject*)f)->core_enabled;
+ if (f == NULL) {
+ return NULL;
+ }
+ core_enabled = f->core_enabled;
+ /*
+ * Don't presume core_num_dims and core_dim_ixs are defined;
+ * they currently are even if core_enabled=0, but there's no real
+ * reason they should be. So avoid segfaults if we change our mind.
+ */
+ if (f->core_num_dims != NULL) {
+ core_num_dims = PyTuple_New(f->nargs);
+ if (core_num_dims == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->nargs; i++) {
+ PyObject *val = PyLong_FromLong(f->core_num_dims[i]);
+ PyTuple_SET_ITEM(core_num_dims, i, val);
+ core_num_ixs += f->core_num_dims[i];
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_num_dims = Py_None;
+ }
+ if (f->core_dim_ixs != NULL) {
+ core_dim_ixs = PyTuple_New(core_num_ixs);
+ if (core_num_dims == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < core_num_ixs; i++) {
+ PyObject * val = PyLong_FromLong(f->core_dim_ixs[i]);
+ PyTuple_SET_ITEM(core_dim_ixs, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_ixs = Py_None;
+ }
Py_DECREF(f);
- return Py_BuildValue("i", core_enabled);
+ return Py_BuildValue("iOO", core_enabled, core_num_dims, core_dim_ixs);
+
+fail:
+ Py_XDECREF(f);
+ Py_XDECREF(core_num_dims);
+ Py_XDECREF(core_dim_ixs);
+ return NULL;
}
static PyMethodDef UMath_TestsMethods[] = {
};
#endif
+/* Initialization function for the module */
#if defined(NPY_PY3K)
-#define RETVAL m
-PyMODINIT_FUNC PyInit__umath_tests(void)
+#define RETVAL(x) x
+PyMODINIT_FUNC PyInit__umath_tests(void) {
#else
-#define RETVAL
-PyMODINIT_FUNC
-init_umath_tests(void)
+#define RETVAL(x)
+PyMODINIT_FUNC init_umath_tests(void) {
#endif
-{
PyObject *m;
PyObject *d;
PyObject *version;
#else
m = Py_InitModule("_umath_tests", UMath_TestsMethods);
#endif
- if (m == NULL)
- return RETVAL;
-
+ if (m == NULL) {
+ return RETVAL(NULL);
+ }
import_array();
import_ufunc();
Py_DECREF(version);
/* Load the ufunc operators into the module's namespace */
- addUfuncs(d);
-
- if (PyErr_Occurred()) {
+ if (addUfuncs(d) < 0) {
+ Py_DECREF(m);
+ PyErr_Print();
PyErr_SetString(PyExc_RuntimeError,
"cannot load _umath_tests module.");
+ return RETVAL(NULL);
}
- return RETVAL;
+ return RETVAL(m);
}
--- /dev/null
+#define _UMATHMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include <Python.h>
+
+#include "npy_config.h"
+
+#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
+#define NO_IMPORT_ARRAY
+
+#include "cpuid.h"
+
+#define XCR_XFEATURE_ENABLED_MASK 0x0
+#define XSTATE_SSE 0x2
+#define XSTATE_YMM 0x4
+
+/*
+ * verify the OS supports avx instructions
+ * it can be disabled in some OS, e.g. with the nosavex boot option of linux
+ */
+static NPY_INLINE
+int os_avx_support(void)
+{
+#if HAVE_XGETBV
+ /*
+ * use bytes for xgetbv to avoid issues with compiler not knowing the
+ * instruction
+ */
+ unsigned int eax, edx;
+ unsigned int ecx = XCR_XFEATURE_ENABLED_MASK;
+ __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ return (eax & (XSTATE_SSE | XSTATE_YMM)) == (XSTATE_SSE | XSTATE_YMM);
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * Primitive cpu feature detect function
+ * Currently only supports checking for avx on gcc compatible compilers.
+ */
+NPY_NO_EXPORT int
+npy_cpu_supports(const char * feature)
+{
+#ifdef HAVE___BUILTIN_CPU_SUPPORTS
+ if (strcmp(feature, "avx2") == 0) {
+ return __builtin_cpu_supports("avx2") && os_avx_support();
+ }
+ else if (strcmp(feature, "avx") == 0) {
+ return __builtin_cpu_supports("avx") && os_avx_support();
+ }
+#endif
+
+ return 0;
+}
--- /dev/null
+#ifndef _NPY_PRIVATE__CPUID_H_
+#define _NPY_PRIVATE__CPUID_H_
+
+#include <numpy/ndarraytypes.h> /* for NPY_NO_EXPORT */
+
+NPY_NO_EXPORT int
+npy_cpu_supports(const char * feature);
+
+#endif
if (!errmask) {
return 0;
}
- fperr = PyUFunc_getfperr();
+ fperr = npy_get_floatstatus_barrier((char*)extobj);
if (!fperr) {
return 0;
}
*/
#define PW_BLOCKSIZE 128
+
+/*
+ * largest simd vector size in bytes numpy supports
+ * it is currently a extremely large value as it is only used for memory
+ * overlap checks
+ */
+#ifndef NPY_MAX_SIMD_SIZE
+#define NPY_MAX_SIMD_SIZE 1024
+#endif
+
/*
* include vectorized functions and dispatchers
* this file is safe to include also for generic builds
do { \
/* condition allows compiler to optimize the generic macro */ \
if (IS_BINARY_CONT(tin, tout)) { \
- if (args[2] == args[0]) { \
+ if (abs_ptrdiff(args[2], args[0]) == 0 && \
+ abs_ptrdiff(args[2], args[1]) >= NPY_MAX_SIMD_SIZE) { \
BASE_BINARY_LOOP_INP(tin, tout, op) \
} \
- else if (args[2] == args[1]) { \
+ else if (abs_ptrdiff(args[2], args[1]) == 0 && \
+ abs_ptrdiff(args[2], args[0]) >= NPY_MAX_SIMD_SIZE) { \
BASE_BINARY_LOOP_INP(tin, tout, op) \
} \
else { \
} \
} \
else if (IS_BINARY_CONT_S1(tin, tout)) { \
- if (args[1] == args[2]) { \
+ if (abs_ptrdiff(args[2], args[1]) == 0) { \
BASE_BINARY_LOOP_S_INP(tin, tout, in1, args[0], in2, ip2, op) \
} \
else { \
} \
} \
else if (IS_BINARY_CONT_S2(tin, tout)) { \
- if (args[0] == args[2]) { \
+ if (abs_ptrdiff(args[2], args[0]) == 0) { \
BASE_BINARY_LOOP_S_INP(tin, tout, in2, args[1], in1, ip1, op) \
} \
else { \
*((npy_bool *)op1) = @func@(in1) != 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
const @type@ in2 = *(@type@ *)ip2;
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
+ if (npy_isnan(io1)) {
+ npy_set_floatstatus_invalid();
+ }
*((@type@ *)iop1) = io1;
}
}
else {
BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
+ @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
- *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
+ in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
+ if (npy_isnan(in1)) {
+ npy_set_floatstatus_invalid();
+ }
+ *((@type@ *)op1) = in1;
}
}
}
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
*((@type@ *)op1) = tmp + 0;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
const npy_half in1 = *(npy_half *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat**/
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
*((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i);
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
((@ftype@ *)op1)[1] = in2i;
}
}
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
* #OP = EQ, NE, GT, GE, LT, LE#
* #identity = NPY_TRUE, NPY_FALSE, -1*4#
*/
+
+/**begin repeat1
+ * #suffix = , _OO_O#
+ * #as_bool = 1, 0#
+ */
NPY_NO_EXPORT void
-OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
+OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
BINARY_LOOP {
- int ret;
PyObject *ret_obj;
PyObject *in1 = *(PyObject **)ip1;
PyObject *in2 = *(PyObject **)ip2;
if (ret_obj == NULL) {
return;
}
- ret = PyObject_IsTrue(ret_obj);
- Py_DECREF(ret_obj);
- if (ret == -1) {
- return;
+#if @as_bool@
+ {
+ int ret = PyObject_IsTrue(ret_obj);
+ Py_DECREF(ret_obj);
+ if (ret == -1) {
+ return;
+ }
+ *((npy_bool *)op1) = (npy_bool)ret;
}
- *((npy_bool *)op1) = (npy_bool)ret;
+#else
+ *((PyObject **)op1) = ret_obj;
+#endif
}
}
+/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
* #kind = equal, not_equal, greater, greater_equal, less, less_equal#
* #OP = EQ, NE, GT, GE, LT, LE#
*/
+/**begin repeat1
+ * #suffix = , _OO_O#
+ */
NPY_NO_EXPORT void
-OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
"cannot specify both 'sig' and 'signature'");
return -1;
}
- Py_INCREF(obj);
+ /*
+ * No INCREF or DECREF needed: got a borrowed reference above,
+ * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it.
+ */
PyDict_SetItemString(normal_kwds, "signature", obj);
PyDict_DelItemString(normal_kwds, "sig");
}
npy_intp nin = ufunc->nin;
npy_intp nout = ufunc->nout;
npy_intp nargs = PyTuple_GET_SIZE(args);
+ npy_intp nkwds = PyDict_Size(*normal_kwds);
PyObject *obj;
if (nargs < nin) {
/* If we have more args than nin, they must be the output variables.*/
if (nargs > nin) {
- if(PyDict_GetItemString(*normal_kwds, "out")) {
+ if(nkwds > 0 && PyDict_GetItemString(*normal_kwds, "out")) {
PyErr_Format(PyExc_TypeError,
"argument given by name ('out') and position "
"(%"NPY_INTP_FMT")", nin);
Py_DECREF(obj);
}
}
+ /* gufuncs accept either 'axes' or 'axis', but not both */
+ if (nkwds >= 2 && (PyDict_GetItemString(*normal_kwds, "axis") &&
+ PyDict_GetItemString(*normal_kwds, "axes"))) {
+ PyErr_SetString(PyExc_TypeError,
+ "cannot specify both 'axis' and 'axes'");
+ return -1;
+ }
/* finally, ufuncs accept 'sig' or 'signature' normalize to 'signature' */
- return normalize_signature_keyword(*normal_kwds);
+ return nkwds == 0 ? 0 : normalize_signature_keyword(*normal_kwds);
}
static int
npy_intp nargs = PyTuple_GET_SIZE(args);
npy_intp i;
PyObject *obj;
- static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims"};
+ static PyObject *NoValue = NULL;
+ static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims",
+ "initial"};
- if (nargs < 1 || nargs > 5) {
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return -1;
+
+ if (nargs < 1 || nargs > 6) {
PyErr_Format(PyExc_TypeError,
- "ufunc.reduce() takes from 1 to 5 positional "
+ "ufunc.reduce() takes from 1 to 6 positional "
"arguments but %"NPY_INTP_FMT" were given", nargs);
return -1;
}
}
obj = PyTuple_GetSlice(args, 3, 4);
}
+ /* Remove initial=np._NoValue */
+ if (i == 5 && obj == NoValue) {
+ continue;
+ }
PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
if (i == 3) {
Py_DECREF(obj);
if (*normal_args == NULL) {
return -1;
}
-
/* ufuncs accept 'sig' or 'signature' normalize to 'signature' */
return normalize_signature_keyword(*normal_kwds);
}
PyObject *with_override[NPY_MAXARGS];
PyObject *array_ufunc_methods[NPY_MAXARGS];
- PyObject *obj;
- PyObject *other_obj;
PyObject *out;
PyObject *method_name = NULL;
/* Choose an overriding argument */
for (i = 0; i < num_override_args; i++) {
- obj = with_override[i];
- if (obj == NULL) {
+ override_obj = with_override[i];
+ if (override_obj == NULL) {
continue;
}
- /* Get the first instance of an overriding arg.*/
- override_obj = obj;
-
/* Check for sub-types to the right of obj. */
for (j = i + 1; j < num_override_args; j++) {
- other_obj = with_override[j];
+ PyObject *other_obj = with_override[j];
if (other_obj != NULL &&
- PyObject_Type(other_obj) != PyObject_Type(obj) &&
+ Py_TYPE(other_obj) != Py_TYPE(override_obj) &&
PyObject_IsInstance(other_obj,
- PyObject_Type(override_obj))) {
+ (PyObject *)Py_TYPE(override_obj))) {
override_obj = NULL;
break;
}
dtype = PyArray_DESCR(out);
Py_INCREF(dtype);
- ret = (PyArrayObject_fields *)PyArray_NewFromDescr(&PyArray_Type,
- dtype,
- ndim, shape,
- strides,
- PyArray_DATA(out),
- PyArray_FLAGS(out),
- NULL);
+ /* TODO: use PyArray_NewFromDescrAndBase here once multiarray and umath
+ * are merged
+ */
+ ret = (PyArrayObject_fields *)PyArray_NewFromDescr(
+ &PyArray_Type, dtype,
+ ndim, shape, strides, PyArray_DATA(out),
+ PyArray_FLAGS(out), NULL);
if (ret == NULL) {
return NULL;
}
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
}
#if @fperr@
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
#endif
/*
#if @fperr@
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
return Py_NotImplemented;
}
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&out);
/*
* here we do the actual calculation with arg1 and arg2
}
/* Check status flag. If it is set, then look up what to do */
- retstatus = PyUFunc_getfperr();
+ retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
i += 2 * stride;
/* minps/minpd will set invalid flag if nan is encountered */
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&c1);
LOOP_BLOCKED(@type@, 32) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
}
c1 = @vpre@_@VOP@_@vsuf@(c1, c2);
- if (npy_get_floatstatus() & NPY_FPE_INVALID) {
+ if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) {
*op = @nan@;
}
else {
LOOP_BLOCKED_END {
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
+ if (npy_isnan(*op)) {
+ npy_set_floatstatus_invalid();
+ }
}
/**end repeat1**/
#endif
/**********************************************/
+typedef struct {
+ PyObject *in; /* The input arguments to the ufunc, a tuple */
+ PyObject *out; /* The output arguments, a tuple. If no non-None outputs are
+ provided, then this is NULL. */
+} ufunc_full_args;
+
+/* Get the arg tuple to pass in the context argument to __array_wrap__ and
+ * __array_prepare__.
+ *
+ * Output arguments are only passed if at least one is non-None.
+ */
+static PyObject *
+_get_wrap_prepare_args(ufunc_full_args full_args) {
+ if (full_args.out == NULL) {
+ Py_INCREF(full_args.in);
+ return full_args.in;
+ }
+ else {
+ return PySequence_Concat(full_args.in, full_args.out);
+ }
+}
+
/* ---------------------------------------------------------------- */
static int
* non-clearing get was only added in 1.9 so this function always cleared
* keep it so just in case third party code relied on the clearing
*/
- return npy_clear_floatstatus();
+ char param = 0;
+ return npy_clear_floatstatus_barrier(¶m);
}
#define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \
PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
{
/* clearing is done for backward compatibility */
- int retstatus = npy_clear_floatstatus();
+ int retstatus;
+ retstatus = npy_clear_floatstatus_barrier((char*)&retstatus);
return PyUFunc_handlefperr(errmask, errobj, retstatus, first);
}
NPY_NO_EXPORT void
PyUFunc_clearfperr()
{
- npy_clear_floatstatus();
+ char param = 0;
+ npy_clear_floatstatus_barrier(¶m);
}
/*
* defines the method.
*/
static PyObject*
-_find_array_method(PyObject *args, int nin, PyObject *method_name)
+_find_array_method(PyObject *args, PyObject *method_name)
{
int i, n_methods;
PyObject *obj;
PyObject *method = NULL;
n_methods = 0;
- for (i = 0; i < nin; i++) {
+ for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
obj = PyTuple_GET_ITEM(args, i);
if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) {
continue;
* should just have PyArray_Return called.
*/
static void
-_find_array_prepare(PyObject *args, PyObject *kwds,
+_find_array_prepare(ufunc_full_args args,
PyObject **output_prep, int nin, int nout)
{
- Py_ssize_t nargs;
int i;
+ PyObject *prep;
/*
* Determine the prepping function given by the input arrays
* (could be NULL).
*/
- PyObject *prep = _find_array_method(args, nin, npy_um_str_array_prepare);
+ prep = _find_array_method(args.in, npy_um_str_array_prepare);
/*
* For all the output arrays decide what to do.
*
* exact ndarray so that no PyArray_Return is
* done in that case.
*/
- nargs = PyTuple_GET_SIZE(args);
- for (i = 0; i < nout; i++) {
- int j = nin + i;
- PyObject *obj = NULL;
- if (j < nargs) {
- obj = PyTuple_GET_ITEM(args, j);
- /* Output argument one may also be in a keyword argument */
- if (i == 0 && obj == Py_None && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
- }
- /* Output argument one may also be in a keyword argument */
- else if (i == 0 && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
-
- if (obj == NULL) {
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
Py_XINCREF(prep);
output_prep[i] = prep;
}
- else {
+ }
+ else {
+ for (i = 0; i < nout; i++) {
output_prep[i] = _get_output_array_method(
- obj, npy_um_str_array_prepare, prep);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_prepare, prep);
}
}
Py_XDECREF(prep);
/*
* Parses the positional and keyword arguments for a generic ufunc call.
- *
- * Note that if an error is returned, the caller must free the
- * non-zero references in out_op. This
- * function does not do its own clean-up.
+ * All returned arguments are new references (with optional ones NULL
+ * if not present)
*/
static int
get_ufunc_arguments(PyUFuncObject *ufunc,
NPY_ORDER *out_order,
NPY_CASTING *out_casting,
PyObject **out_extobj,
- PyObject **out_typetup,
- int *out_subok,
- PyArrayObject **out_wheremask,
- PyObject **out_axes)
+ PyObject **out_typetup, /* type: Tuple[np.dtype] */
+ int *out_subok, /* bool */
+ PyArrayObject **out_wheremask, /* PyArray of bool */
+ PyObject **out_axes, /* type: List[Tuple[T]] */
+ PyObject **out_axis, /* type: T */
+ int *out_keepdims) /* bool */
{
int i, nargs;
int nin = ufunc->nin;
int nout = ufunc->nout;
+ int nop = ufunc->nargs;
PyObject *obj, *context;
PyObject *str_key_obj = NULL;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
int has_sig = 0;
+ /*
+ * Initialize objects so caller knows when outputs and other optional
+ * arguments are set (also means we can safely XDECREF on failure).
+ */
+ for (i = 0; i < nop; i++) {
+ out_op[i] = NULL;
+ }
*out_extobj = NULL;
*out_typetup = NULL;
if (out_axes != NULL) {
*out_axes = NULL;
}
+ if (out_axis != NULL) {
+ *out_axis = NULL;
+ }
if (out_wheremask != NULL) {
*out_wheremask = NULL;
}
/* Check number of arguments */
nargs = PyTuple_Size(args);
- if ((nargs < nin) || (nargs > ufunc->nargs)) {
+ if ((nargs < nin) || (nargs > nop)) {
PyErr_SetString(PyExc_ValueError, "invalid number of arguments");
return -1;
}
*/
context = Py_BuildValue("OOi", ufunc, args, i);
if (context == NULL) {
- return -1;
+ goto fail;
}
}
else {
}
if (out_op[i] == NULL) {
- return -1;
+ goto fail;
}
type_num = PyArray_DESCR(out_op[i])->type_num;
}
}
- if (any_flexible && !any_flexible_userloops && !any_object) {
+ if (any_flexible && !any_flexible_userloops && !any_object && nin == 2) {
/* Traditionally, we return -2 here (meaning "NotImplemented") anytime
* we hit the above condition.
*
if (!strcmp(ufunc_name, "equal") ||
!strcmp(ufunc_name, "not_equal")) {
/* Warn on non-scalar, return NotImplemented regardless */
- assert(nin == 2);
if (PyArray_NDIM(out_op[0]) != 0 ||
PyArray_NDIM(out_op[1]) != 0) {
if (DEPRECATE_FUTUREWARNING(
"elementwise comparison failed; returning scalar "
"instead, but in the future will perform elementwise "
"comparison") < 0) {
- return -1;
+ goto fail;
}
}
+ Py_DECREF(out_op[0]);
+ Py_DECREF(out_op[1]);
return -2;
}
else if (!strcmp(ufunc_name, "less") ||
#if !defined(NPY_PY3K)
if (DEPRECATE("unorderable dtypes; returning scalar but in "
"the future this will be an error") < 0) {
- return -1;
+ goto fail;
}
#endif
+ Py_DECREF(out_op[0]);
+ Py_DECREF(out_op[1]);
return -2;
}
}
for (i = nin; i < nargs; ++i) {
obj = PyTuple_GET_ITEM(args, i);
if (_set_out_array(obj, out_op + i) < 0) {
- return -1;
+ goto fail;
}
}
switch (str[0]) {
case 'a':
- /* possible axis argument for generalized ufunc */
+ /* possible axes argument for generalized ufunc */
if (out_axes != NULL && strcmp(str, "axes") == 0) {
+ if (out_axis != NULL && *out_axis != NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "cannot specify both 'axis' and 'axes'");
+ goto fail;
+ }
+ Py_INCREF(value);
*out_axes = value;
bad_arg = 0;
}
+ else if (out_axis != NULL && strcmp(str, "axis") == 0) {
+ if (out_axes != NULL && *out_axes != NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "cannot specify both 'axis' and 'axes'");
+ goto fail;
+ }
+ Py_INCREF(value);
+ *out_axis = value;
+ bad_arg = 0;
+ }
break;
case 'c':
/* Provides a policy for allowed casting */
if (dtype != NULL) {
if (*out_typetup != NULL) {
PyErr_SetString(PyExc_RuntimeError,
- "cannot specify both 'sig' and 'dtype'");
+ "cannot specify both 'signature' and 'dtype'");
goto fail;
}
*out_typetup = Py_BuildValue("(N)", dtype);
* error mask, and error object
*/
if (strcmp(str, "extobj") == 0) {
+ Py_INCREF(value);
*out_extobj = value;
bad_arg = 0;
}
break;
+ case 'k':
+ if (out_keepdims != NULL && strcmp(str, "keepdims") == 0) {
+ if (!PyBool_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "'keepdims' must be a boolean");
+ goto fail;
+ }
+ *out_keepdims = (value == Py_True);
+ bad_arg = 0;
+ }
+ break;
case 'o':
/*
* Output arrays may be specified as a keyword argument,
}
if (*out_typetup != NULL) {
PyErr_SetString(PyExc_RuntimeError,
- "cannot specify both 'sig' and 'dtype'");
+ "cannot specify both 'signature' and 'dtype'");
goto fail;
}
- *out_typetup = value;
Py_INCREF(value);
+ *out_typetup = value;
bad_arg = 0;
has_sig = 1;
}
fail:
Py_XDECREF(str_key_obj);
- Py_XDECREF(*out_extobj);
- *out_extobj = NULL;
Py_XDECREF(*out_typetup);
- *out_typetup = NULL;
+ Py_XDECREF(*out_extobj);
+ if (out_wheremask != NULL) {
+ Py_XDECREF(*out_wheremask);
+ }
if (out_axes != NULL) {
Py_XDECREF(*out_axes);
- *out_axes = NULL;
}
- if (out_wheremask != NULL) {
- Py_XDECREF(*out_wheremask);
- *out_wheremask = NULL;
+ if (out_axis != NULL) {
+ Py_XDECREF(*out_axis);
+ }
+ for (i = 0; i < nop; i++) {
+ Py_XDECREF(out_op[i]);
}
return -1;
}
prepare_ufunc_output(PyUFuncObject *ufunc,
PyArrayObject **op,
PyObject *arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
int i)
{
if (arr_prep != NULL && arr_prep != Py_None) {
PyObject *res;
PyArrayObject *arr;
+ PyObject *args_tup;
- res = PyObject_CallFunction(arr_prep, "O(OOi)",
- *op, ufunc, arr_prep_args, i);
- if ((res == NULL) || (res == Py_None) || !PyArray_Check(res)) {
- if (!PyErr_Occurred()){
- PyErr_SetString(PyExc_TypeError,
- "__array_prepare__ must return an "
- "ndarray or subclass thereof");
- }
- Py_XDECREF(res);
+ /* Call with the context argument */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ return -1;
+ }
+ res = PyObject_CallFunction(
+ arr_prep, "O(OOi)", *op, ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
+ if (res == NULL) {
+ return -1;
+ }
+ else if (!PyArray_Check(res)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__array_prepare__ must return an "
+ "ndarray or subclass thereof");
+ Py_DECREF(res);
return -1;
}
arr = (PyArrayObject *)res;
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args,
+ ufunc_full_args full_args,
PyUFuncGenericFunction innerloop,
void *innerloopdata)
{
- npy_intp i, iop, nin = ufunc->nin, nout = ufunc->nout;
+ npy_intp i, nin = ufunc->nin, nout = ufunc->nout;
npy_intp nop = nin + nout;
npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
continue;
}
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
return -1;
}
}
/* Call the __array_prepare__ functions for the new array */
if (prepare_ufunc_output(ufunc, &op[nin+i],
- arr_prep[i], arr_prep_args, i) < 0) {
- for(iop = 0; iop < nin+i; ++iop) {
- if (op_it[iop] != op[iop]) {
- /* ignore errors */
- PyArray_ResolveWritebackIfCopy(op_it[iop]);
- }
- }
+ arr_prep[i], full_args, i) < 0) {
NpyIter_Deallocate(iter);
return -1;
}
NPY_END_THREADS;
}
- for(iop = 0; iop < nop; ++iop) {
- if (op_it[iop] != op[iop]) {
- PyArray_ResolveWritebackIfCopy(op_it[iop]);
- }
- }
- NpyIter_Deallocate(iter);
- return 0;
+ return NpyIter_Deallocate(iter);
}
/*
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
npy_intp nin = ufunc->nin, nout = ufunc->nout;
PyUFuncGenericFunction innerloop;
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[1],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
/* Call the __prepare_array__ if necessary */
if (prepare_ufunc_output(ufunc, &op[2],
- arr_prep[0], arr_prep_args, 0) < 0) {
+ arr_prep[0], full_args, 0) < 0) {
return -1;
}
NPY_UF_DBG_PRINT("iterator loop\n");
if (iterator_loop(ufunc, op, dtypes, order,
- buffersize, arr_prep, arr_prep_args,
+ buffersize, arr_prep, full_args,
innerloop, innerloopdata) < 0) {
return -1;
}
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- PyObject *arr_prep_args)
+ ufunc_full_args full_args)
{
- int retval, i, nin = ufunc->nin, nout = ufunc->nout;
+ int i, nin = ufunc->nin, nout = ufunc->nout;
int nop = nin + nout;
npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
Py_INCREF(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
- arr_prep[i], arr_prep_args, i) < 0) {
+ arr_prep[i], full_args, i) < 0) {
NpyIter_Deallocate(iter);
return -1;
}
NPY_AUXDATA_FREE(innerloopdata);
}
- retval = 0;
- nop = NpyIter_GetNOp(iter);
- for(i=0; i< nop; ++i) {
- if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) {
- retval = -1;
+ return NpyIter_Deallocate(iter);
+}
+
+static npy_bool
+tuple_all_none(PyObject *tup) {
+ npy_intp i;
+ for (i = 0; i < PyTuple_GET_SIZE(tup); ++i) {
+ if (PyTuple_GET_ITEM(tup, i) != Py_None) {
+ return NPY_FALSE;
}
}
-
- NpyIter_Deallocate(iter);
- return retval;
+ return NPY_TRUE;
}
-static PyObject *
-make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds)
+/*
+ * Convert positional args and the out kwarg into an input and output tuple.
+ *
+ * If the output tuple would be all None, return NULL instead.
+ *
+ * This duplicates logic in many places, so further refactoring is needed:
+ * - get_ufunc_arguments
+ * - PyUFunc_WithOverride
+ * - normalize___call___args
+ */
+static int
+make_full_arg_tuple(
+ ufunc_full_args *full_args,
+ npy_intp nin, npy_intp nout,
+ PyObject *args, PyObject *kwds)
{
- PyObject *out = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
- PyObject *arr_prep_args;
+ PyObject *out_kwd = NULL;
+ npy_intp nargs = PyTuple_GET_SIZE(args);
+ npy_intp i;
- if (out == NULL) {
- Py_INCREF(args);
- return args;
+ /* This should have been checked by the caller */
+ assert(nin <= nargs && nargs <= nin + nout);
+
+ /* Initialize so we can XDECREF safely */
+ full_args->in = NULL;
+ full_args->out = NULL;
+
+ /* Get the input arguments*/
+ full_args->in = PyTuple_GetSlice(args, 0, nin);
+ if (full_args->in == NULL) {
+ goto fail;
}
- else {
- npy_intp i, nargs = PyTuple_GET_SIZE(args), n;
- n = nargs;
- if (n < nin + 1) {
- n = nin + 1;
- }
- arr_prep_args = PyTuple_New(n);
- if (arr_prep_args == NULL) {
- return NULL;
+
+ /* Look for output keyword arguments */
+ out_kwd = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
+
+ if (out_kwd != NULL) {
+ assert(nargs == nin);
+ if (out_kwd == Py_None) {
+ return 0;
}
- /* Copy the tuple, but set the nin-th item to the keyword arg */
- for (i = 0; i < nin; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else if (PyTuple_Check(out_kwd)) {
+ assert(PyTuple_GET_SIZE(out_kwd) == nout);
+ if (tuple_all_none(out_kwd)) {
+ return 0;
+ }
+ Py_INCREF(out_kwd);
+ full_args->out = out_kwd;
+ return 0;
}
- Py_INCREF(out);
- PyTuple_SET_ITEM(arr_prep_args, nin, out);
- for (i = nin+1; i < n; ++i) {
- PyObject *item = PyTuple_GET_ITEM(args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(arr_prep_args, i, item);
+ else {
+ /* A single argument x is promoted to (x, None, None ...) */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ Py_INCREF(out_kwd);
+ PyTuple_SET_ITEM(full_args->out, 0, out_kwd);
+ for (i = 1; i < nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i, Py_None);
+ }
+ return 0;
}
+ }
+
+ /* No outputs in kwargs; if also none in args, we're done */
+ if (nargs == nin) {
+ return 0;
+ }
+ /* copy across positional output arguments, adding trailing Nones */
+ full_args->out = PyTuple_New(nout);
+ if (full_args->out == NULL) {
+ goto fail;
+ }
+ for (i = nin; i < nargs; ++i) {
+ PyObject *item = PyTuple_GET_ITEM(args, i);
+ Py_INCREF(item);
+ PyTuple_SET_ITEM(full_args->out, i - nin, item);
+ }
+ for (i = nargs; i < nin + nout; ++i) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(full_args->out, i - nin, Py_None);
+ }
- return arr_prep_args;
+ /* don't return a tuple full of None */
+ if (tuple_all_none(full_args->out)) {
+ Py_DECREF(full_args->out);
+ full_args->out = NULL;
}
+ return 0;
+
+fail:
+ Py_XDECREF(full_args->in);
+ Py_XDECREF(full_args->out);
+ return -1;
}
/*
return 0;
}
+/*
+ * Check whether the gufunc can be used with axis, i.e., that there is only
+ * a single, shared core dimension (which means that operands either have
+ * that dimension, or have no core dimensions). Returns 0 if all is fine,
+ * and sets an error and returns -1 if not.
+ */
+static int
+_check_axis_support(PyUFuncObject *ufunc) {
+ if (ufunc->core_num_dim_ix != 1) {
+ PyErr_Format(PyExc_TypeError,
+ "%s: axis can only be used with a single shared core "
+ "dimension, not with the %d distinct ones implied by "
+ "signature %s.",
+ ufunc_get_name_cstr(ufunc),
+ ufunc->core_num_dim_ix,
+ ufunc->core_signature);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Check whether the gufunc can be used with keepdims, i.e., that all its
+ * input arguments have the same number of core dimension, and all output
+ * arguments have no core dimensions. Returns 0 if all is fine, and sets
+ * an error and returns -1 if not.
+ */
+static int
+_check_keepdims_support(PyUFuncObject *ufunc) {
+ int i;
+ int nin = ufunc->nin, nout = ufunc->nout;
+ int input_core_dims = ufunc->core_num_dims[0];
+ for (i = 1; i < nin + nout; i++) {
+ if (ufunc->core_num_dims[i] != (i < nin ? input_core_dims : 0)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s does not support keepdims: its signature %s requires "
+ "%s %d to have %d core dimensions, but keepdims can only "
+ "be used when all inputs have the same number of core "
+ "dimensions and all outputs have no core dimensions.",
+ ufunc_get_name_cstr(ufunc),
+ ufunc->core_signature,
+ i < nin ? "input" : "output",
+ i < nin ? i : i - nin,
+ ufunc->core_num_dims[i]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
/*
* Interpret a possible axes keyword argument, using it to fill the remap_axis
* array which maps default to actual axes for each operand, indexed as
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axes_arg(PyUFuncObject *ufunc, PyObject *axes, PyArrayObject **op,
- int broadcast_ndim, int **remap_axis) {
+_parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
+ PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
- int nout = ufunc->nout;
- int nop = nin + nout;
+ int nop = ufunc->nargs;
int iop, list_size;
if (!PyList_Check(axes)) {
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
- op_ncore = ufunc->core_num_dims[iop];
+ op_ncore = core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
return 0;
}
+/*
+ * Simplified version of the above, using axis to fill the remap_axis
+ * array, which maps default to actual axes for each operand, indexed as
+ * as remap_axis[iop][iaxis]. The default axis order has first all broadcast
+ * axes and then the core axes the gufunc operates on.
+ *
+ * Returns 0 on success, and -1 on failure
+ */
+static int
+_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
+ PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
+ int nop = ufunc->nargs;
+ int iop, axis_int;
+
+ axis_int = PyArray_PyIntAsInt(axis);
+ if (error_converting(axis_int)) {
+ return -1;
+ }
+
+ for (iop = 0; iop < nop; ++iop) {
+ int axis, op_ndim, op_axis;
+
+ /* _check_axis_support ensures core_num_dims is 0 or 1 */
+ if (core_num_dims[iop] == 0) {
+ remap_axis[iop] = NULL;
+ continue;
+ }
+ if (op[iop]) {
+ op_ndim = PyArray_NDIM(op[iop]);
+ }
+ else {
+ op_ndim = broadcast_ndim + 1;
+ }
+ op_axis = axis_int; /* ensure we don't modify axis_int */
+ if (check_and_adjust_axis(&op_axis, op_ndim) < 0) {
+ return -1;
+ }
+ /* Are we actually remapping away from last axis? */
+ if (op_axis == op_ndim - 1) {
+ remap_axis[iop] = NULL;
+ continue;
+ }
+ remap_axis[iop][op_ndim - 1] = op_axis;
+ for (axis = 0; axis < op_axis; axis++) {
+ remap_axis[iop][axis] = axis;
+ }
+ for (axis = op_axis; axis < op_ndim - 1; axis++) {
+ remap_axis[iop][axis] = axis + 1;
+ }
+ } /* end of for(iop) loop over operands */
+ return 0;
+}
+
#define REMAP_AXIS(iop, axis) ((remap_axis != NULL && \
remap_axis[iop] != NULL)? \
remap_axis[iop][axis] : axis)
int nin, nout;
int i, j, idim, nop;
const char *ufunc_name;
- int retval = 0, subok = 1;
+ int retval, subok = 1;
int needs_api = 0;
PyArray_Descr *dtypes[NPY_MAXARGS];
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
+ int core_num_dims_array[NPY_MAXARGS];
+ int *core_num_dims;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
int **remap_axis = NULL;
/* The __array_prepare__ function to call for each output */
PyObject *arr_prep[NPY_MAXARGS];
- /*
- * This is either args, or args with the out= parameter from
- * kwds added appropriately.
- */
- PyObject *arr_prep_args = NULL;
+ /* The separated input and output arguments, parsed from args and kwds */
+ ufunc_full_args full_args = {NULL, NULL};
NPY_ORDER order = NPY_KEEPORDER;
/* Use the default assignment casting rule */
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
- /* When provided, extobj, typetup, and axes contain borrowed references */
- PyObject *extobj = NULL, *type_tup = NULL, *axes = NULL;
+ /* other possible keyword arguments */
+ PyObject *extobj, *type_tup, *axes, *axis;
+ int keepdims = -1;
if (ufunc == NULL) {
PyErr_SetString(PyExc_ValueError, "function not supported");
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
- /* Initialize all the operands and dtypes to NULL */
+ /* Initialize all dtypes and __array_prepare__ call-backs to NULL */
for (i = 0; i < nop; ++i) {
- op[i] = NULL;
dtypes[i] = NULL;
arr_prep[i] = NULL;
}
NPY_UF_DBG_PRINT("Getting arguments\n");
- /* Get all the arguments */
+ /*
+ * Get all the arguments.
+ */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, NULL, &axes);
+ &type_tup, &subok, NULL, &axes, &axis, &keepdims);
if (retval < 0) {
- goto fail;
+ NPY_UF_DBG_PRINT("Failure in getting arguments\n");
+ return retval;
+ }
+ /*
+ * If keepdims was passed in (and thus changed from the initial value
+ * on top), check the gufunc is suitable, i.e., that its inputs share
+ * the same number of core dimensions, and its outputs have none.
+ */
+ if (keepdims != -1) {
+ retval = _check_keepdims_support(ufunc);
+ if (retval < 0) {
+ goto fail;
+ }
+ }
+ if (axis != NULL) {
+ retval = _check_axis_support(ufunc);
+ if (retval < 0) {
+ goto fail;
+ }
+ }
+ /*
+ * If keepdims is set and true, signal all dimensions will be the same.
+ */
+ if (keepdims == 1) {
+ int num_dims = ufunc->core_num_dims[0];
+ for (i = 0; i < nop; ++i) {
+ core_num_dims_array[i] = num_dims;
+ }
+ core_num_dims = core_num_dims_array;
+ }
+ else {
+ /* keepdims was not set or was false; no adjustment necessary */
+ core_num_dims = ufunc->core_num_dims;
+ keepdims = 0;
}
-
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
for (i = 0; i < nop; i++) {
- if (op[i] != NULL && PyArray_NDIM(op[i]) < ufunc->core_num_dims[i]) {
+ if (op[i] != NULL && PyArray_NDIM(op[i]) < core_num_dims[i]) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
- ufunc_get_name_cstr(ufunc),
+ ufunc_name,
i < nin ? "Input" : "Output",
- i < nin ? i : i - nin, PyArray_NDIM(op[i]),
- ufunc->core_signature, ufunc->core_num_dims[i]);
+ i < nin ? i : i - nin,
+ PyArray_NDIM(op[i]),
+ ufunc->core_signature,
+ core_num_dims[i]);
+ retval = -1;
goto fail;
}
}
*/
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
- int n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ int n = PyArray_NDIM(op[i]) - core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
*/
iter_ndim = broadcast_ndim;
for (i = nin; i < nop; ++i) {
- iter_ndim += ufunc->core_num_dims[i];
+ iter_ndim += core_num_dims[i];
}
if (iter_ndim > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
}
/* Possibly remap axes. */
- if (axes) {
+ if (axes != NULL || axis != NULL) {
remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop);
remap_axis_memory = PyArray_malloc(sizeof(remap_axis_memory[0]) *
nop * NPY_MAXDIMS);
for (i=0; i < nop; i++) {
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
- retval = _parse_axes_arg(ufunc, axes, op, broadcast_ndim,
- remap_axis);
+ if (axis) {
+ retval = _parse_axis_arg(ufunc, core_num_dims, axis, op,
+ broadcast_ndim, remap_axis);
+ }
+ else {
+ retval = _parse_axes_arg(ufunc, core_num_dims, axes, op,
+ broadcast_ndim, remap_axis);
+ }
if(retval < 0) {
goto fail;
}
j = broadcast_ndim;
for (i = 0; i < nop; ++i) {
int n;
+
if (op[i]) {
/*
* Note that n may be negative if broadcasting
* extends into the core dimensions.
*/
- n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
+ n = PyArray_NDIM(op[i]) - core_num_dims[i];
}
else {
n = broadcast_ndim;
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
- int num_dims = ufunc->core_num_dims[i];
- /* Fill in 'iter_shape' and 'op_axes' for this output */
+ int num_dims = core_num_dims[i];
+ /*
+ * Fill in 'iter_shape' and 'op_axes' for the core dimensions
+ * of this output. Here, we have to be careful: if keepdims
+ * was used, then this axis is not a real core dimension,
+ * but is being added back for broadcasting, so its size is 1.
+ */
for (idim = 0; idim < num_dims; ++idim) {
- iter_shape[j] = core_dim_sizes[
+ iter_shape[j] = keepdims ? 1 : core_dim_sizes[
ufunc->core_dim_ixs[dim_offset + idim]];
op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
++j;
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
+
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* If the loop wants the arrays, provide them */
#endif
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&iter);
NPY_UF_DBG_PRINT("Executing inner loop\n");
goto fail;
}
- /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */
- for(i=nin; i< nop; ++i)
- if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0)
- goto fail;
-
PyArray_free(inner_strides);
- NpyIter_Deallocate(iter);
+ if (NpyIter_Deallocate(iter) < 0) {
+ retval = -1;
+ }
+
/* The caller takes ownership of all the references in op */
for (i = 0; i < nop; ++i) {
Py_XDECREF(dtypes[i]);
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(extobj);
+ Py_XDECREF(axes);
+ Py_XDECREF(axis);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
- NPY_UF_DBG_PRINT("Returning Success\n");
+ NPY_UF_DBG_PRINT1("Returning code %d\n", reval);
- return 0;
+ return retval;
fail:
NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval);
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(extobj);
+ Py_XDECREF(axes);
+ Py_XDECREF(axis);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
PyArray_free(remap_axis_memory);
PyArray_free(remap_axis);
return retval;
* This is either args, or args with the out= parameter from
* kwds added appropriately.
*/
- PyObject *arr_prep_args = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int trivial_loop_ok = 0;
NPY_ORDER order = NPY_KEEPORDER;
/* Use the default assignment casting rule */
NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING;
- /* When provided, extobj and typetup contain borrowed references */
- PyObject *extobj = NULL, *type_tup = NULL;
+ PyObject *extobj, *type_tup;
if (ufunc == NULL) {
PyErr_SetString(PyExc_ValueError, "function not supported");
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
- /* Initialize all the operands and dtypes to NULL */
+ /* Initialize all the dtypes and __array_prepare__ callbacks to NULL */
for (i = 0; i < nop; ++i) {
- op[i] = NULL;
dtypes[i] = NULL;
arr_prep[i] = NULL;
}
/* Get all the arguments */
retval = get_ufunc_arguments(ufunc, args, kwds,
op, &order, &casting, &extobj,
- &type_tup, &subok, &wheremask, NULL);
+ &type_tup, &subok, &wheremask, NULL, NULL, NULL);
if (retval < 0) {
- goto fail;
+ NPY_UF_DBG_PRINT("Failure in getting arguments\n");
+ return retval;
}
/*
#endif
if (subok) {
+ if (make_full_arg_tuple(&full_args, nin, nout, args, kwds) < 0) {
+ goto fail;
+ }
/*
* Get the appropriate __array_prepare__ function to call
* for each output
*/
- _find_array_prepare(args, kwds, arr_prep, nin, nout);
-
- /* Set up arr_prep_args if a prep function was needed */
- for (i = 0; i < nout; ++i) {
- if (arr_prep[i] != NULL && arr_prep[i] != Py_None) {
- arr_prep_args = make_arr_prep_args(nin, args, kwds);
- break;
- }
- }
+ _find_array_prepare(full_args, arr_prep, nin, nout);
}
/* Start with the floating-point exception flags cleared */
- PyUFunc_clearfperr();
+ npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
if (need_fancy) {
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
op, dtypes, order,
- buffersize, arr_prep, arr_prep_args);
+ buffersize, arr_prep, full_args);
}
if (retval < 0) {
goto fail;
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(extobj);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
- NPY_UF_DBG_PRINT("Returning Success\n");
+ NPY_UF_DBG_PRINT("Returning success code 0\n");
return 0;
Py_XDECREF(arr_prep[i]);
}
Py_XDECREF(type_tup);
- Py_XDECREF(arr_prep_args);
+ Py_XDECREF(extobj);
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
Py_XDECREF(wheremask);
return retval;
*/
static PyArrayObject *
PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
- int naxes, int *axes, PyArray_Descr *odtype, int keepdims)
+ int naxes, int *axes, PyArray_Descr *odtype, int keepdims,
+ PyObject *initial)
{
int iaxes, ndim;
npy_bool reorderable;
npy_bool axis_flags[NPY_MAXDIMS];
PyArray_Descr *dtype;
PyArrayObject *result;
- PyObject *identity = NULL;
+ PyObject *identity;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
+ static PyObject *NoValue = NULL;
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name);
+ npy_cache_import("numpy", "_NoValue", &NoValue);
+ if (NoValue == NULL) return NULL;
+
ndim = PyArray_NDIM(arr);
/* Create an array of flags for reduction */
if (identity == NULL) {
return NULL;
}
- /*
- * The identity for a dynamic dtype like
- * object arrays can't be used in general
- */
- if (identity != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+
+ /* Get the initial value */
+ if (initial == NULL || initial == NoValue) {
+ initial = identity;
+
+ /*
+ * The identity for a dynamic dtype like
+ * object arrays can't be used in general
+ */
+ if (initial != Py_None && PyArray_ISOBJECT(arr) && PyArray_SIZE(arr) != 0) {
+ Py_DECREF(initial);
+ initial = Py_None;
+ Py_INCREF(initial);
+ }
+ } else {
Py_DECREF(identity);
- identity = Py_None;
- Py_INCREF(identity);
+ Py_INCREF(initial); /* match the reference count in the if above */
}
/* Get the reduction dtype */
if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) {
- Py_DECREF(identity);
+ Py_DECREF(initial);
return NULL;
}
NPY_UNSAFE_CASTING,
axis_flags, reorderable,
keepdims, 0,
- identity,
+ initial,
reduce_loop,
ufunc, buffersize, ufunc_name, errormask);
Py_DECREF(dtype);
- Py_DECREF(identity);
+ Py_DECREF(initial);
return result;
}
*/
ndim_iter = ndim;
flags |= NPY_ITER_MULTI_INDEX;
- /* Add some more flags */
- op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED;
- op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED;
+ /*
+ * Add some more flags.
+ *
+ * The accumulation outer loop is 'elementwise' over the array, so turn
+ * on NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE. That is, in-place
+ * accumulate(x, out=x) is safe to do without temporary copies.
+ */
+ op_flags[0] |= NPY_ITER_UPDATEIFCOPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ op_flags[1] |= NPY_ITER_COPY|NPY_ITER_ALIGNED|NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
op_dtypes_param = op_dtypes;
op_dtypes[1] = op_dtypes[0];
NPY_UF_DBG_PRINT("Allocating outer iterator\n");
}
finish:
- /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */
- if (PyArray_ResolveWritebackIfCopy(op[0]) < 0)
- goto fail;
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
NpyIter_Deallocate(iter_inner);
op_axes_arrays[2]};
npy_uint32 op_flags[3];
int i, idim, ndim, otype_final;
- int need_outer_iterator;
+ int need_outer_iterator = 0;
NpyIter *iter = NULL;
}
finish:
- if (op[0] && PyArray_ResolveWritebackIfCopy(op[0]) < 0) {
- goto fail;
- }
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
PyArray_Descr *otype = NULL;
PyArrayObject *out = NULL;
int keepdims = 0;
+ PyObject *initial = NULL;
static char *reduce_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", NULL};
+ "array", "axis", "dtype", "out", "keepdims", "initial", NULL};
static char *accumulate_kwlist[] = {
"array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
}
}
else {
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce",
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iO:reduce",
reduce_kwlist,
&op,
&axes_in,
PyArray_DescrConverter2, &otype,
PyArray_OutputConverter, &out,
- &keepdims)) {
+ &keepdims, &initial)) {
goto fail;
}
}
switch(operation) {
case UFUNC_REDUCE:
ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes,
- otype, keepdims);
+ otype, keepdims, initial);
break;
case UFUNC_ACCUMULATE:
if (naxes != 1) {
* should just have PyArray_Return called.
*/
static void
-_find_array_wrap(PyObject *args, PyObject *kwds,
+_find_array_wrap(ufunc_full_args args, PyObject *kwds,
PyObject **output_wrap, int nin, int nout)
{
- Py_ssize_t nargs;
- int i, idx_offset, start_idx;
+ int i;
PyObject *obj;
PyObject *wrap = NULL;
* Determine the wrapping function given by the input arrays
* (could be NULL).
*/
- wrap = _find_array_method(args, nin, npy_um_str_array_wrap);
+ wrap = _find_array_method(args.in, npy_um_str_array_wrap);
/*
* For all the output arrays decide what to do.
* done in that case.
*/
handle_out:
- nargs = PyTuple_GET_SIZE(args);
- /* Default is using positional arguments */
- obj = args;
- idx_offset = nin;
- start_idx = 0;
- if (nin == nargs && kwds != NULL) {
- /* There may be a keyword argument we can use instead */
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- if (obj == NULL) {
- /* No, go back to positional (even though there aren't any) */
- obj = args;
- }
- else {
- idx_offset = 0;
- if (PyTuple_Check(obj)) {
- /* If a tuple, must have all nout items */
- nargs = nout;
- }
- else {
- /* If the kwarg is not a tuple then it is an array (or None) */
- output_wrap[0] = _get_output_array_method(
- obj, npy_um_str_array_wrap, wrap);
- start_idx = 1;
- nargs = 1;
- }
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
+ Py_XINCREF(wrap);
+ output_wrap[i] = wrap;
}
}
-
- for (i = start_idx; i < nout; ++i) {
- int j = idx_offset + i;
-
- if (j < nargs) {
+ else {
+ for (i = 0; i < nout; i++) {
output_wrap[i] = _get_output_array_method(
- PyTuple_GET_ITEM(obj, j), npy_um_str_array_wrap, wrap);
- }
- else {
- output_wrap[i] = wrap;
- Py_XINCREF(wrap);
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
}
}
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
{
int i;
- PyTupleObject *ret;
PyArrayObject *mps[NPY_MAXARGS];
PyObject *retobj[NPY_MAXARGS];
PyObject *wraparr[NPY_MAXARGS];
- PyObject *res;
PyObject *override = NULL;
+ ufunc_full_args full_args = {NULL, NULL};
int errval;
errval = PyUFunc_CheckOverride(ufunc, "__call__", args, kwds, &override);
return override;
}
- /*
- * Initialize all array objects to NULL to make cleanup easier
- * if something goes wrong.
- */
- for (i = 0; i < ufunc->nargs; i++) {
- mps[i] = NULL;
- }
-
errval = PyUFunc_GenericFunction(ufunc, args, kwds, mps);
if (errval < 0) {
- for (i = 0; i < ufunc->nargs; i++) {
- PyArray_DiscardWritebackIfCopy(mps[i]);
- Py_XDECREF(mps[i]);
- }
if (errval == -1) {
return NULL;
}
* None --- array-object passed in don't call PyArray_Return
* method --- the __array_wrap__ method to call.
*/
- _find_array_wrap(args, kwds, wraparr, ufunc->nin, ufunc->nout);
+ if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) {
+ goto fail;
+ }
+ _find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout);
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
int j = ufunc->nin+i;
PyObject *wrap = wraparr[i];
- if (wrap != NULL) {
- if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- continue;
+ if (wrap == NULL) {
+ /* default behavior */
+ retobj[i] = PyArray_Return(mps[j]);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ retobj[i] = (PyObject *)mps[j];
+ }
+ else {
+ PyObject *res;
+ PyObject *args_tup;
+
+ /* Call the method with appropriate context */
+ args_tup = _get_wrap_prepare_args(full_args);
+ if (args_tup == NULL) {
+ goto fail;
}
- res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], ufunc, args, i);
+ res = PyObject_CallFunction(
+ wrap, "O(OOi)", mps[j], ufunc, args_tup, i);
+ Py_DECREF(args_tup);
+
/* Handle __array_wrap__ that does not accept a context argument */
if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL);
}
Py_DECREF(wrap);
+ Py_DECREF(mps[j]);
+ mps[j] = NULL; /* Prevent fail double-freeing this */
if (res == NULL) {
goto fail;
}
- else {
- Py_DECREF(mps[j]);
- retobj[i] = res;
- continue;
- }
- }
- else {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
+ retobj[i] = res;
}
-
}
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
+
if (ufunc->nout == 1) {
return retobj[0];
}
else {
+ PyTupleObject *ret;
+
ret = (PyTupleObject *)PyTuple_New(ufunc->nout);
for (i = 0; i < ufunc->nout; i++) {
PyTuple_SET_ITEM(ret, i, retobj[i]);
}
fail:
+ Py_XDECREF(full_args.in);
+ Py_XDECREF(full_args.out);
for (i = ufunc->nin; i < ufunc->nargs; i++) {
Py_XDECREF(mps[i]);
}
const char *name, const char *doc, int unused)
{
return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes,
- nin, nout, identity, name, doc, 0, NULL);
+ nin, nout, identity, name, doc, unused, NULL);
}
/*UFUNC_API*/
NpyIter_Deallocate(iter_buffer);
- if (op1_array != (PyArrayObject*)op1) {
- PyArray_ResolveWritebackIfCopy(op1_array);
- }
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
}
fail:
-
+ /* iter_buffer has already been deallocated, don't use NpyIter_Dealloc */
if (op1_array != (PyArrayObject*)op1) {
- PyArray_ResolveWritebackIfCopy(op1_array);
+ PyArray_DiscardWritebackIfCopy(op1_array);
}
Py_XDECREF(op2_array);
Py_XDECREF(iter);
/* Keywords are ignored for now */
PyObject *function, *pyname = NULL;
- int nin, nout, i;
+ int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
PyUFuncObject *self;
- char *fname, *str;
+ char *fname, *str, *types, *doc;
Py_ssize_t fname_len = -1;
+ void * ptr, **data;
int offset[2];
if (!PyArg_ParseTuple(args, "Oii:frompyfunc", &function, &nin, &nout)) {
PyErr_SetString(PyExc_TypeError, "function must be callable");
return NULL;
}
- if (nin + nout > NPY_MAXARGS) {
- PyErr_Format(PyExc_ValueError,
- "Cannot construct a ufunc with more than %d operands "
- "(requested number were: inputs = %d and outputs = %d)",
- NPY_MAXARGS, nin, nout);
- return NULL;
- }
- self = PyArray_malloc(sizeof(PyUFuncObject));
- if (self == NULL) {
- return NULL;
- }
- PyObject_Init((PyObject *)self, &PyUFunc_Type);
-
- self->userloops = NULL;
- self->nin = nin;
- self->nout = nout;
- self->nargs = nin + nout;
- self->identity = PyUFunc_None;
- self->functions = pyfunc_functions;
- self->ntypes = 1;
-
- /* generalized ufunc */
- self->core_enabled = 0;
- self->core_num_dim_ix = 0;
- self->core_num_dims = NULL;
- self->core_dim_ixs = NULL;
- self->core_offsets = NULL;
- self->core_signature = NULL;
- self->op_flags = PyArray_malloc(sizeof(npy_uint32)*self->nargs);
- if (self->op_flags == NULL) {
- return PyErr_NoMemory();
- }
- memset(self->op_flags, 0, sizeof(npy_uint32)*self->nargs);
- self->iter_flags = 0;
-
- self->type_resolver = &object_ufunc_type_resolver;
- self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
+ nargs = nin + nout;
pyname = PyObject_GetAttrString(function, "__name__");
if (pyname) {
}
/*
- * self->ptr holds a pointer for enough memory for
+ * ptr will be assigned to self->ptr, holds a pointer for enough memory for
* self->data[0] (fdata)
* self->data
* self->name
if (i) {
offset[0] += (sizeof(void *) - i);
}
- offset[1] = self->nargs;
- i = (self->nargs % sizeof(void *));
+ offset[1] = nargs;
+ i = (nargs % sizeof(void *));
if (i) {
offset[1] += (sizeof(void *)-i);
}
- self->ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
+ ptr = PyArray_malloc(offset[0] + offset[1] + sizeof(void *) +
(fname_len + 14));
- if (self->ptr == NULL) {
+ if (ptr == NULL) {
Py_XDECREF(pyname);
return PyErr_NoMemory();
}
- Py_INCREF(function);
- self->obj = function;
- fdata = (PyUFunc_PyFuncData *)(self->ptr);
+ fdata = (PyUFunc_PyFuncData *)(ptr);
+ fdata->callable = function;
fdata->nin = nin;
fdata->nout = nout;
- fdata->callable = function;
- self->data = (void **)(((char *)self->ptr) + offset[0]);
- self->data[0] = (void *)fdata;
- self->types = (char *)self->data + sizeof(void *);
- for (i = 0; i < self->nargs; i++) {
- self->types[i] = NPY_OBJECT;
+ data = (void **)(((char *)ptr) + offset[0]);
+ data[0] = (void *)fdata;
+ types = (char *)data + sizeof(void *);
+ for (i = 0; i < nargs; i++) {
+ types[i] = NPY_OBJECT;
}
- str = self->types + offset[1];
+ str = types + offset[1];
memcpy(str, fname, fname_len);
memcpy(str+fname_len, " (vectorized)", 14);
- self->name = str;
-
Py_XDECREF(pyname);
/* Do a better job someday */
- self->doc = "dynamic ufunc based on a python function";
+ doc = "dynamic ufunc based on a python function";
+
+ self = (PyUFuncObject *)PyUFunc_FromFuncAndData(
+ (PyUFuncGenericFunction *)pyfunc_functions, data,
+ types, /* ntypes */ 1, nin, nout, PyUFunc_None,
+ str, doc, /* unused */ 0);
+
+ if (self == NULL) {
+ PyArray_free(ptr);
+ return NULL;
+ }
+ Py_INCREF(function);
+ self->obj = function;
+ self->ptr = ptr;
+
+ self->type_resolver = &object_ufunc_type_resolver;
+ self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
return (PyObject *)self;
}
#include <stdio.h>
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_umath(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC initumath(void)
#endif
{
m = Py_InitModule("umath", methods);
#endif
if (!m) {
- return RETVAL;
+ goto err;
}
/* Import the array */
PyErr_SetString(PyExc_ImportError,
"umath failed: Could not import array core.");
}
- return RETVAL;
+ goto err;
}
/* Initialize the types */
if (PyType_Ready(&PyUFunc_Type) < 0)
- return RETVAL;
+ goto err;
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
goto err;
}
- return RETVAL;
+ return RETVAL(m);
err:
/* Check for errors */
PyErr_SetString(PyExc_RuntimeError,
"cannot load umath module.");
}
- return RETVAL;
+ return RETVAL(NULL);
}
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite
+from numpy.testing import assert_
import numbers
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_raises, HAS_REFCOUNT
-)
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT
+ )
# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
- a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ class MyNDArray(np.ndarray):
+ pass
- # subok=True passes through a matrix
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
- assert_equal(type(b), np.matrix)
+ assert_equal(type(b), MyNDArray)
- # subok=False never returns a matrix
+ # subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
-
-if __name__ == "__main__":
- run_module_suite()
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
-import sys, gc
+import sys
+import gc
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises, assert_warns, dec,
-)
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+ )
import textwrap
class TestArrayRepr(object):
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
- @dec.knownfailureif(True, "See gh-10544")
+ @pytest.mark.xfail(reason="See gh-10544")
def test_object_subclass(self):
class sub(np.ndarray):
def __new__(cls, inp):
"[ 'xxxxx']"
)
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
+ # gh-10934 style was broken in legacy mode, check it works
+ np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
with np.printoptions(**opts) as ctx:
saved_opts = ctx.copy()
assert_equal({k: saved_opts[k] for k in opts}, opts)
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy
import numpy as np
import datetime
+import pytest
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_warns, dec, suppress_warnings
-)
+ assert_, assert_equal, assert_raises, assert_warns, suppress_warnings
+ )
# Use pytz to test out various time zones if available
try:
except ImportError:
_has_pytz = False
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
+
class TestDateTime(object):
def test_datetime_dtype_creation(self):
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
- # regression tests for GH6452
+ # regression tests for gh-6452
assert_equal(np.datetime64('NaT'),
np.datetime64('2000') + np.timedelta64('NaT'))
# nb. we may want to make NaT != NaT true in the future
# find "supertype" for non-dates and dates
b = np.bool_(True)
- dt = np.datetime64('1970-01-01', 'M')
- arr = np.array([b, dt])
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.date(1970, 1, 1)
- arr = np.array([b, dt])
+ arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
- dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(pickle.loads(pickle.dumps(dt)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt)), dt)
+ scalar = np.datetime64('2016-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(scalar)), scalar)
+ delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(delta)), delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
+ # Allow positive years
+ assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
- @dec.skipif(not _has_pytz, "The pytz module is not available.")
+ @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
+ def test_corecursive_input(self):
+ # construct a co-recursive list
+ a, b = [], []
+ a.append(b)
+ b.append(a)
+ obj_arr = np.array([None])
+ obj_arr[0] = a
+
+ # gh-11154: This shouldn't cause a C stack overflow
+ assert_raises(RecursionError, obj_arr.astype, 'M8')
+ assert_raises(RecursionError, obj_arr.astype, 'm8')
+
class TestDateTimeData(object):
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
+ assert_, assert_equal, assert_array_equal, assert_raises,
suppress_warnings,
-)
+ )
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import operator
import warnings
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_raises, assert_warns, assert_no_warnings,
- assert_array_equal, assert_, dec)
+ assert_raises, assert_warns, assert_no_warnings, assert_array_equal,
+ assert_
+ )
try:
import pytz
warning_cls = np.VisibleDeprecationWarning
+class TestNonTupleNDIndexDeprecation(object):
+ def test_basic(self):
+ a = np.zeros((5, 5))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('always')
+ assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_warns(FutureWarning, a.__getitem__, [slice(None)])
+
+ warnings.filterwarnings('error')
+ assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_raises(FutureWarning, a.__getitem__, [slice(None)])
+
+ # a a[[0, 1]] always was advanced indexing, so no error/warning
+ a[[0, 1]]
+
+
class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
- @dec.skipif(not _has_pytz, "The pytz module is not available.")
+ @pytest.mark.skipif(not _has_pytz,
+ reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
-if __name__ == "__main__":
- run_module_suite()
+class TestGeneratorSum(_DeprecationTestCase):
+ # 2018-02-25, 1.15.0
+ def test_generator_sum(self):
+ self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestFromstring(_DeprecationTestCase):
+ # 2017-10-19, 1.14
+ def test_fromstring(self):
+ self.assert_deprecated(np.fromstring, args=('\x00'*80,))
import pickle
import sys
import operator
+import pytest
+import ctypes
import numpy as np
from numpy.core._rational_tests import rational
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- dec
-)
+from numpy.testing import assert_, assert_equal, assert_raises
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
+ # Array of subtype should preserve alignment
+ dt1 = np.dtype([('a', '|i1'),
+ ('b', [('f0', '<i2'),
+ ('f1', '<f4')], 2)], align=True)
+ assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
+ ('b', [('f0', '<i2'), ('', '|V2'),
+ ('f1', '<f4')], (2,))])
+
def test_union_struct(self):
# Should be able to create union dtypes
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
- @dec.skipif(sys.version_info[0] >= 3)
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
def test_dtype_str_with_long_in_shape(self):
# Pull request #376, should not error
np.dtype('(1L,)i4')
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
-if __name__ == "__main__":
- run_module_suite()
+class TestFromCTypes(object):
+
+ @staticmethod
+ def check(ctype, dtype):
+ dtype = np.dtype(dtype)
+ assert_equal(np.dtype(ctype), dtype)
+ assert_equal(np.dtype(ctype()), dtype)
+
+ def test_array(self):
+ c8 = ctypes.c_uint8
+ self.check( 3 * c8, (np.uint8, (3,)))
+ self.check( 1 * c8, (np.uint8, (1,)))
+ self.check( 0 * c8, (np.uint8, (0,)))
+ self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
+ self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
+
+ def test_padded_structure(self):
+ class PaddedStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ @pytest.mark.xfail(reason="_pack_ is ignored - see gh-11651")
+ def test_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ])
+ self.check(PackedStructure, expected)
+
+ @pytest.mark.xfail(sys.byteorder != 'little',
+ reason="non-native endianness does not work - see gh-10533")
+ def test_little_endian_structure(self):
+ class PaddedStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '<B'),
+ ('b', '<H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ @pytest.mark.xfail(sys.byteorder != 'big',
+ reason="non-native endianness does not work - see gh-10533")
+ def test_big_endian_structure(self):
+ class PaddedStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '>B'),
+ ('b', '>H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_raises, suppress_warnings
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_raises, suppress_warnings
)
# Setup for optimize einsum
global_size_dict[char] = size
-class TestEinSum(object):
+class TestEinsum(object):
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
[10.] * 2)
- p = np.ones((1, 5))
- q = np.ones((5, 5))
+ # a blas-compatible contraction broadcasting case which was failing
+ # for optimize=True (ticket #10930)
+ x = np.array([2., 3.])
+ y = np.array([4.])
+ assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
+ assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
+
+ # all-ones array was bypassing bug (ticket #10930)
+ p = np.ones((1, 5)) / 2
+ q = np.ones((5, 5)) / 2
for optimize in (True, False):
assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
optimize=optimize),
optimize=optimize))
assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
optimize=optimize),
- np.full((1, 5), 5))
+ np.full((1, 5), 1.25))
+
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
+ def test_subscript_range(self):
+ # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
+ # when creating a subscript from arrays
+ a = np.ones((2, 3))
+ b = np.ones((3, 4))
+ np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
+ assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
+ assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
+
def test_einsum_broadcast(self):
# Issue #2455 change in handling ellipsis
# remove the 'middle broadcast' error
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
assert_equal(res, tgt)
- def optimize_compare(self, string):
+ def test_out_is_res(self):
+ a = np.arange(9).reshape(3, 3)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+ assert res is a
+
+ def optimize_compare(self, subscripts, operands=None):
# Tests all paths of the optimization function against
# conventional einsum
- operands = [string]
- terms = string.split('->')[0].split(',')
- for term in terms:
- dims = [global_size_dict[x] for x in term]
- operands.append(np.random.rand(*dims))
-
- noopt = np.einsum(*operands, optimize=False)
- opt = np.einsum(*operands, optimize='greedy')
+ if operands is None:
+ args = [subscripts]
+ terms = subscripts.split('->')[0].split(',')
+ for term in terms:
+ dims = [global_size_dict[x] for x in term]
+ args.append(np.random.rand(*dims))
+ else:
+ args = [subscripts] + operands
+
+ noopt = np.einsum(*args, optimize=False)
+ opt = np.einsum(*args, optimize='greedy')
assert_almost_equal(opt, noopt)
- opt = np.einsum(*operands, optimize='optimal')
+ opt = np.einsum(*args, optimize='optimal')
assert_almost_equal(opt, noopt)
def test_hadamard_like_products(self):
b = np.einsum('bbcdc->d', a)
assert_equal(b, [12])
+ def test_broadcasting_dot_cases(self):
+ # Ensures broadcasting cases are not mistaken for GEMM
-class TestEinSumPath(object):
+ a = np.random.rand(1, 5, 4)
+ b = np.random.rand(4, 6)
+ c = np.random.rand(5, 6)
+ d = np.random.rand(10)
+
+ self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
+ self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
+
+ e = np.random.rand(1, 1, 5, 4)
+ f = np.random.rand(7, 7)
+ self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
+ self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
+
+ # Edge case found in gh-11308
+ g = np.arange(64).reshape(2, 4, 8)
+ self.optimize_compare('obk,ijk->ioj', operands=[g, g])
+
+
+class TestEinsumPath(object):
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
- (1, 4), (2, 4), (1, 4), (1, 3), (1, 2), (0, 1)])
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
# Long test 2
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
+ print(path)
self.assert_path_equal(path, ['einsum_path',
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
+ print(path)
self.assert_path_equal(path, ['einsum_path',
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
# Edge test4
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
- self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 2), (0, 1)])
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*path_test, optimize=True)
- self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 2), (0, 1)])
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
# no error for any spacing
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
-
-if __name__ == "__main__":
- run_module_suite()
+def test_overlap():
+ a = np.arange(9, dtype=int).reshape(3, 3)
+ b = np.arange(9, dtype=int).reshape(3, 3)
+ d = np.dot(a, b)
+ # sanity check
+ c = np.einsum('ij,jk->ik', a, b)
+ assert_equal(c, d)
+ #gh-10080, out overlaps one of the operands
+ c = np.einsum('ij,jk->ik', a, b, out=b)
+ assert_equal(c, d)
from __future__ import division, absolute_import, print_function
import platform
+import pytest
import numpy as np
-from numpy.testing import assert_, run_module_suite, dec
+from numpy.testing import assert_
class TestErrstate(object):
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
with np.errstate(call=None):
assert_(np.geterrcall() is None, 'call is not None')
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
-
-
-if __name__ == "__main__":
- run_module_suite()
import itertools
import contextlib
import operator
+import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.compat import long
-from numpy.testing import assert_raises, assert_equal, dec
+from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
assert_equal(d, c)
-@dec.slow
+@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if c != d:
assert_equal(d, c)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
-from numpy import (logspace, linspace, geomspace, dtype, array, sctypes,
- arange, isnan, ndarray, sqrt, nextafter)
+from numpy import (
+ logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
+ ndarray, sqrt, nextafter
+ )
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_allclose, suppress_warnings
-)
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
+ suppress_warnings
+ )
class PhysicalQuantity(float):
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
-from numpy.testing import (
- run_module_suite, assert_equal, assert_, assert_raises
-)
-from numpy.core.getlimits import (_discovered_machar, _float16_ma, _float32_ma,
- _float64_ma, _float128_ma, _float80_ma)
+from numpy.testing import assert_equal, assert_, assert_raises
+from numpy.core.getlimits import (
+ _discovered_machar, _float16_ma, _float32_ma, _float64_ma, _float128_ma,
+ _float80_ma
+ )
##################################################
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import platform
+import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import run_module_suite, assert_, assert_equal, dec
+from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel",
+ reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import run_module_suite, assert_raises
+from numpy.testing import assert_raises
class TestIndexErrors(object):
'''Tests to exercise indexerrors not covered by other tests.'''
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
-
-if __name__ == "__main__":
- run_module_suite()
import warnings
import functools
import operator
+import pytest
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_warns, dec, HAS_REFCOUNT, suppress_warnings,
-)
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
+ HAS_REFCOUNT, suppress_warnings,
+ )
class TestIndexing(object):
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
+ def test_trivial_fancy_not_possible(self):
+ # Test that the fast path for trivial assignment is not incorrectly
+ # used when the index is not contiguous or 1D, see also gh-11467.
+ a = np.arange(6)
+ idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
+ assert_array_equal(a[idx], idx)
+
+ # this case must not go into the fast path, note that idx is
+ # a non-contiuguous none 1D array here.
+ a[idx] = -1
+ res = np.arange(6)
+ res[0] = -1
+ res[3] = -1
+ assert_array_equal(a, res)
+
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
- def test_matrix_fancy(self):
- # The matrix class messes with the shape. While this is always
- # weird (getitem is not used, it does not have setitem nor knows
- # about fancy indexing), this tests gh-3110
- m = np.matrix([[1, 2], [3, 4]])
-
- assert_(isinstance(m[[0,1,0], :], np.matrix))
-
- # gh-3110. Note the transpose currently because matrices do *not*
- # support dimension fixing for fancy indexing correctly.
- x = np.asmatrix(np.arange(50).reshape(5,10))
- assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
-
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_slice_decref_getsetslice(self):
# See gh-10066, a temporary slice object should be discarted.
# This test is only really interesting on Python 2 since
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
- indx = np.array(indx, dtype=np.intp)
+ try:
+ indx = np.array(indx, dtype=np.intp)
+ except ValueError:
+ raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
- arr = arr.reshape((arr.shape[:ax]
- + mi.shape
- + arr.shape[ax+1:]))
+ try:
+ arr = arr.reshape((arr.shape[:ax]
+ + mi.shape
+ + arr.shape[ax+1:]))
+ except ValueError:
+ # too many dimensions, probably
+ raise IndexError
ax += mi.ndim
continue
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
- assert_raises(Exception, arr.__getitem__, index)
- assert_raises(Exception, arr.__setitem__, index, 0)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
- assert_raises(Exception, arr.__getitem__, index)
- assert_raises(Exception, arr.__setitem__, index, 0)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
def test_1d(self):
a = np.arange(10)
- with warnings.catch_warnings():
- warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
- for index in self.complex_indices:
- self._check_single_index(a, index)
+ for index in self.complex_indices:
+ self._check_single_index(a, index)
class TestFloatNonIntegerArgument(object):
"""
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_raises,
- assert_array_equal, HAS_REFCOUNT
-)
+ assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
+ )
class TestTake(object):
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
-import locale
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, dec, assert_raises,
- assert_array_equal, temppath,
-)
-from ._locales import CommaDecimalPointLocale
+ assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ )
+from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
-@dec.skipif(LD_INFO.precision + 2 >= repr_precision,
- "repr precision not enough to show eps")
+@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
+ reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
np.longdouble(b"1.2")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
assert_raises(ValueError, np.longdouble, "1.0 flub")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
- @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
assert_(repr(o) != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "BUG #2376")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "BUG #2376")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
-@dec.knownfailureif(longdouble_longer_than_double, "array repr problem")
-@dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l")
+@pytest.mark.skipif(longdouble_longer_than_double,
+ reason="array repr problem")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
def test_fromstring_foreign_value(self):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-from numpy.testing import run_module_suite
+
class TestMachAr(object):
def _run_machar_highprec(self):
except FloatingPointError as e:
msg = "Caught %s exception, should not have been raised." % e
raise AssertionError(msg)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import itertools
+import pytest
import numpy as np
-from numpy.testing import (run_module_suite, assert_, assert_raises, assert_equal,
- assert_array_equal, assert_allclose, dec)
-
from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.compat import long
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_array_equal, assert_allclose
+ )
if sys.version_info[0] >= 3:
xrange = range
_check_assignment(srcidx, dstidx)
-@dec.slow
+@pytest.mark.slow
def test_diophantine_fuzz():
# Fuzz test the diophantine solver
rng = np.random.RandomState(1234)
infeasible += 1
-@dec.slow
+@pytest.mark.slow
def test_may_share_memory_easy_fuzz():
# Check that overlap problems with common strides are always
# solved with little work.
min_count=2000)
-@dec.slow
+@pytest.mark.slow
def test_may_share_memory_harder_fuzz():
# Overlap problems with not necessarily common strides take more
# work.
# Check result
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
- @dec.slow
+ @pytest.mark.slow
def test_unary_ufunc_call_fuzz(self):
self.check_unary_fuzz(np.invert, None, np.int16)
check(x, x.copy(), x)
check(x, x, x.copy())
- @dec.slow
+ @pytest.mark.slow
def test_binary_ufunc_1d_manual(self):
ufunc = np.add
x += x.T
assert_array_equal(x - x.T, 0)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import os
import shutil
-from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
import mmap
+import pytest
+from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- dec, suppress_warnings
-)
+ assert_, assert_equal, assert_array_equal, suppress_warnings
+ )
class TestMemmap(object):
def setup(self):
del b
del fp
- @dec.skipif(Path is None, "No pathlib.Path")
+ @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_path(self):
tmpname = mktemp('', 'mmap', dir=self.tempdir)
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
- @dec.knownfailureif(sys.platform == 'gnu0', "This test is known to fail on hurd")
+ @pytest.mark.skipif(sys.platform == 'gnu0',
+ reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
-if __name__ == "__main__":
- run_module_suite()
+ def test_no_shape(self):
+ self.tmpfp.write(b'a'*16)
+ mm = memmap(self.tmpfp, dtype='float64')
+ assert_equal(mm.shape, (2,))
from __future__ import division, absolute_import, print_function
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import ctypes
import os
import gc
+import weakref
+import pytest
from contextlib import contextmanager
if sys.version_info[0] >= 3:
import builtins
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
- run_module_suite, assert_, assert_raises, assert_warns,
- assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex,
- assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
- assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
+ assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
+ assert_array_equal, assert_raises_regex, assert_array_almost_equal,
+ assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
+ SkipTest, temppath, suppress_warnings
)
-from ._locales import CommaDecimalPointLocale
+from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
+ def test_real_imag(self):
+ # contiguity checks are for gh-11245
+ x = np.array(1j)
+ xr = x.real
+ xi = x.imag
+
+ assert_equal(xr, np.array(0))
+ assert_(type(xr) is np.ndarray)
+ assert_equal(xr.flags.contiguous, True)
+ assert_equal(xr.flags.f_contiguous, True)
+
+ assert_equal(xi, np.array(1))
+ assert_(type(xi) is np.ndarray)
+ assert_equal(xi.flags.contiguous, True)
+ assert_equal(xi.flags.f_contiguous, True)
+
class TestScalarIndexing(object):
def setup(self):
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
- @dec.slow
+ @pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
- @dec.skipif(sys.version_info[0] >= 3)
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
- @dec.slow
+ @pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
- @dec.knownfailureif(True, "See gh-9847")
+ @pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
- @dec.knownfailureif(True, "See gh-9847")
+ @pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
- arr = np.empty(1000, 'V4')
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
- arr = np.empty(1000, dt)
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
- def test_sort_matrix_none(self):
- a = np.matrix([[2, 1, 0]])
- actual = np.sort(a, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'l')
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'l')
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'l', s[:0])
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'l', s)
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
- def test_partition_matrix_none(self):
- # gh-4301
- a = np.matrix([[2, 1, 0]])
- actual = np.partition(a, 1, axis=None)
- expected = np.matrix([[0, 1, 2]])
- assert_equal(actual, expected)
- assert_(type(expected) is np.matrix)
-
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
with assert_raises(NotImplementedError):
a ** 2
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass(object):
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
np.array([1, 2, 3, 4]),
dtype='<f4')
- @dec.slow # takes > 1 minute on mechanical hard drive
+ @pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
- if sys.version_info[0] >= 3:
- def test_bytes_fields(self):
- # Bytes are not allowed in field names and not recognized in titles
- # on Py3
- assert_raises(TypeError, np.dtype, [(b'a', int)])
- assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
-
- dt = np.dtype([((b'a', 'b'), int)])
- assert_raises(TypeError, dt.__getitem__, b'a')
-
- x = np.array([(1,), (2,), (3,)], dtype=dt)
- assert_raises(IndexError, x.__getitem__, b'a')
-
- y = x[0]
- assert_raises(IndexError, y.__getitem__, b'a')
-
- def test_multiple_field_name_unicode(self):
- def test_assign_unicode():
- dt = np.dtype([("\u20B9", "f8"),
- ("B", "f8"),
- ("\u20B9", "f8")])
-
- # Error raised when multiple fields have the same name(unicode included)
- assert_raises(ValueError, test_assign_unicode)
-
- else:
- def test_unicode_field_titles(self):
- # Unicode field titles are added to field dict on Py2
- title = u'b'
- dt = np.dtype([((title, 'a'), int)])
- dt[title]
- dt['a']
- x = np.array([(1,), (2,), (3,)], dtype=dt)
- x[title]
- x['a']
- y = x[0]
- y[title]
- y['a']
-
- def test_unicode_field_names(self):
- # Unicode field names are converted to ascii on Python 2:
- encodable_name = u'b'
- assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
- assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
-
- # But raises UnicodeEncodeError if it can't be encoded:
- nonencodable_name = u'\uc3bc'
- assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
- assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_bytes_fields(self):
+ # Bytes are not allowed in field names and not recognized in titles
+ # on Py3
+ assert_raises(TypeError, np.dtype, [(b'a', int)])
+ assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
+
+ dt = np.dtype([((b'a', 'b'), int)])
+ assert_raises(TypeError, dt.__getitem__, b'a')
+
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ assert_raises(IndexError, x.__getitem__, b'a')
+
+ y = x[0]
+ assert_raises(IndexError, y.__getitem__, b'a')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_multiple_field_name_unicode(self):
+ def test_assign_unicode():
+ dt = np.dtype([("\u20B9", "f8"),
+ ("B", "f8"),
+ ("\u20B9", "f8")])
+
+ # Error raised when multiple fields have the same name(unicode included)
+ assert_raises(ValueError, test_assign_unicode)
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_titles(self):
+ # Unicode field titles are added to field dict on Py2
+ title = u'b'
+ dt = np.dtype([((title, 'a'), int)])
+ dt[title]
+ dt['a']
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ x[title]
+ x['a']
+ y = x[0]
+ y[title]
+ y['a']
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_names(self):
+ # Unicode field names are converted to ascii on Python 2:
+ encodable_name = u'b'
+ assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
+ assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
+
+ # But raises UnicodeEncodeError if it can't be encoded:
+ nonencodable_name = u'\uc3bc'
+ assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
+ assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
+
+ def test_fromarrays_unicode(self):
+ # A single name string provided to fromarrays() is allowed to be unicode
+ # on both Python 2 and 3:
+ x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
+ assert_equal(x['a'][0], 0)
+ assert_equal(x['b'][0], 1)
+
+ def test_unicode_order(self):
+ # Test that we can sort with order as a unicode field name in both Python 2 and
+ # 3:
+ name = u'b'
+ x = np.array([1, 3, 2], dtype=[(name, int)])
+ x.sort(order=name)
+ assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
fn2 = func('f2')
b[fn2] = 3
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+ # In 1.16 code below can be replaced by:
+ # assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ # assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ # assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning,
+ ".* selecting multiple fields .*")
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+ # view of subfield view/copy
+ assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
+ (2, 3))
+ assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
+ (3, 2))
+ view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
+ assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
+ (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
+ # can be removed in 1.16
+ def test_field_names_deprecation(self):
+
+ def collect_warnings(f, *args, **kwargs):
+ with warnings.catch_warnings(record=True) as log:
+ warnings.simplefilter("always")
+ f(*args, **kwargs)
+ return [w.category for w in log]
+
+ a = np.zeros((1,), dtype=[('f1', 'i4'),
+ ('f2', 'i4'),
+ ('f3', [('sf1', 'i4')])])
+ a['f1'][0] = 1
+ a['f2'][0] = 2
+ a['f3'][0] = (3,)
+ b = np.zeros((1,), dtype=[('f1', 'i4'),
+ ('f2', 'i4'),
+ ('f3', [('sf1', 'i4')])])
+ b['f1'][0] = 1
+ b['f2'][0] = 2
+ b['f3'][0] = (3,)
+
+ # All the different functions raise a warning, but not an error
+ assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
+ [FutureWarning])
+ # For <=1.12 a is not modified, but it will be in 1.13
+ assert_equal(a, b)
+
+ # Views also warn
+ subset = a[['f1', 'f2']]
+ subset_view = subset.view()
+ assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
+ [FutureWarning])
+ # But the write goes through:
+ assert_equal(subset['f1'][0], 10)
+ # Only one warning per multiple field indexing, though (even if there
+ # are multiple views involved):
+ assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
+
+ # make sure views of a multi-field index warn too
+ c = np.zeros(3, dtype='i8,i8,i8')
+ assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
+ [FutureWarning])
+
+
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
- def test_dot_scalar_and_matrix_of_objects(self):
- # Ticket #2469
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.dot(arr, 3), desired)
- assert_equal(np.dot(3, arr), desired)
-
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
- def test_inner_scalar_and_matrix(self):
- for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
- sca = np.array(3, dtype=dt)[()]
- arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
- desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
- assert_equal(np.inner(arr, sca), desired)
- assert_equal(np.inner(sca, arr), desired)
-
- def test_inner_scalar_and_matrix_of_objects(self):
- # Ticket #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.inner(arr, 3), desired)
- assert_equal(np.inner(3, arr), desired)
-
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
+
class TestNewBufferProtocol(object):
+ """ Test PEP3118 buffers """
+
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
# Issue #4015.
self._check_roundtrip(0)
+ def test_invalid_buffer_format(self):
+ # datetime64 cannot be used fully in a buffer yet
+ # Should be fixed in the next Numpy major release
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(3, dt)
+ assert_raises((ValueError, BufferError), memoryview, a)
+ assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
+
+
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
with assert_raises(ValueError):
memoryview(arr)
+ def test_max_dims(self):
+ a = np.empty((1,) * 32)
+ self._check_roundtrip(a)
+
+ @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
+ def test_error_too_many_dims(self):
+ def make_ctype(shape, scalar_type):
+ t = scalar_type
+ for dim in shape[::-1]:
+ t = dim * t
+ return t
+
+ # construct a memoryview with 33 dimensions
+ c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
+ m = memoryview(c_u8_33d())
+ assert_equal(m.ndim, 33)
+
+ assert_raises_regex(
+ RuntimeError, "ndim",
+ np.array, m)
+
+ def test_error_pointer_type(self):
+ # gh-6741
+ m = memoryview(ctypes.pointer(ctypes.c_uint8()))
+ assert_('&' in m.format)
+
+ assert_raises_regex(
+ ValueError, "format string",
+ np.array, m)
+
+ def test_ctypes_integer_via_memoryview(self):
+ # gh-11150, due to bpo-10746
+ for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
+ value = c_integer(42)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ np.asarray(value)
+
+ def test_ctypes_struct_via_memoryview(self):
+ # gh-10528
+ class foo(ctypes.Structure):
+ _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
+ f = foo(a=1, b=2)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ arr = np.asarray(f)
+
+ assert_equal(arr['a'], 1)
+ assert_equal(arr['b'], 2)
+ f.a = 3
+ assert_equal(arr['a'], 3)
+
class TestArrayAttributeDeletion(object):
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
- arr_wb[:] = -100
+ arr_wb[...] = -100
npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
- assert_(not arr_wb.ctypes.data == 0)
- arr_wb[:] = 100
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
assert_equal(arr, -100)
+ def test_dealloc_warning(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ _multiarray_tests.npy_abuse_writebackifcopy(v)
+ assert len(sup.log) == 1
+
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
class TestArange(object):
def test_infinite(self):
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
+class TestArrayFinalize(object):
+ """ Tests __array_finalize__ """
+
+ def test_receives_base(self):
+ # gh-11237
+ class SavesBase(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_base = self.base
+
+ a = np.array(1).view(SavesBase)
+ assert_(a.saved_base is a.base)
+
+ def test_lifetime_on_error(self):
+ # gh-11237
+ class RaisesInFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ # crash, but keep this object alive
+ raise Exception(self)
+
+ # a plain object can't be weakref'd
+ class Dummy(object): pass
+
+ # get a weak reference to an object within an array
+ obj_arr = np.array(Dummy())
+ obj_ref = weakref.ref(obj_arr[()])
+
+ # get an array that crashed in __array_finalize__
+ with assert_raises(Exception) as e:
+ obj_arr.view(RaisesInFinalize)
+ if sys.version_info.major == 2:
+ # prevent an extra reference being kept
+ sys.exc_clear()
+
+ obj_subarray = e.exception.args[0]
+ del e
+ assert_(isinstance(obj_subarray, RaisesInFinalize))
+
+ # reference should still be held by obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "object should not already be dead")
+
+ del obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
+
+ del obj_subarray
+ gc.collect()
+ assert_(obj_ref() is None, "no references should remain")
+
+
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import warnings
+import pytest
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_raises, assert_warns, dec, HAS_REFCOUNT, suppress_warnings
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+ HAS_REFCOUNT, suppress_warnings
)
i.iternext()
return ret
-@dec._needs_refcount
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_iter_refcount():
# Make sure the iterator doesn't leak
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
- it = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[dt])
- assert_(not it.iterationneedsapi)
- assert_(sys.getrefcount(a) > rc_a)
- assert_(sys.getrefcount(dt) > rc_dt)
+ op_dtypes=[dt]) as it:
+ assert_(not it.iterationneedsapi)
+ assert_(sys.getrefcount(a) > rc_a)
+ assert_(sys.getrefcount(dt) > rc_dt)
+ # del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
- i[0:2] = (3, 3)
- assert_equal(a, [3, 1, 2])
- assert_equal(b, [3, 1, 2])
- assert_equal(c, [0, 1, 2])
- i[1] = 12
- assert_equal(i[0:2], [3, 12])
+ with i:
+ i[0:2] = (3, 3)
+ assert_equal(a, [3, 1, 2])
+ assert_equal(b, [3, 1, 2])
+ assert_equal(c, [0, 1, 2])
+ i[1] = 12
+ assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][...] = 3
+ it.operands[0][...] = 14
+ assert_equal(a, 14)
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0][-1:1]
+ x[...] = 14
+ it.operands[0][...] = -1234
+ assert_equal(a, -1234)
+ # check for no warnings on dealloc
+ x = None
+ it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
-
+ del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
- i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv')
- assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 2
- i = None
+ with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+ casting='equiv') as i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 12345
+ i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
- i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']])
- assert_(i.operands[0].flags.aligned)
- assert_equal(i.operands[0], a)
- i.operands[0][:] = 3
- i = None
+ with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+ assert_(i.operands[0].flags.aligned)
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ with i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
- i = nditer(a, [], [['readwrite', 'updateifcopy']],
+ with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
- op_dtypes=[np.dtype('>f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+ op_dtypes=[np.dtype('>f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
- i = nditer(a, [],
+ with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0], a)
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- assert_equal(i.operands[0].strides, (4, 16, 48))
- # Check that UPDATEIFCOPY is activated
- i.operands[0][2, 1, 1] = -12.5
- assert_(a[2, 1, 1] != -12.5)
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ assert_equal(i.operands[0].strides, (4, 16, 48))
+ # Check that WRITEBACKIFCOPY is activated at exit
+ i.operands[0][2, 1, 1] = -12.5
+ assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
- i = nditer(a, [],
+ with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
- op_dtypes=[np.dtype('f4')])
- assert_equal(i.operands[0].dtype, np.dtype('f4'))
- # Even though the stride was negative in 'a', it
- # becomes positive in the temporary
- assert_equal(i.operands[0].strides, (4,))
- i.operands[0][:] = [1, 2, 3]
- i = None
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ # Even though the stride was negative in 'a', it
+ # becomes positive in the temporary
+ assert_equal(i.operands[0].strides, (4,))
+ i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
- for x in i:
- x[...] = None
- vals, i, x = [None]*3
+ with i:
+ for x in i:
+ x[...] = None
+ vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
- for x in i:
- x[...] += 1
+ with i:
+ for x in i:
+ x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
- ob = i[0][()]
- if HAS_REFCOUNT:
- rc = sys.getrefcount(ob)
- for x in i:
- x[...] += 1
+ with i:
+ ob = i[0][()]
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(ob)
+ for x in i:
+ x[...] += 1
if HAS_REFCOUNT:
assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
for flag in ['readonly', 'writeonly', 'readwrite']:
a = arange(10)
i = nditer([a], ['copy_if_overlap'], [[flag]])
- assert_(i.operands[0] is a)
+ with i:
+ assert_(i.operands[0] is a)
# Copy needed, 2 ops, read-write overlap
x = arange(10)
a = x[1:]
b = x[:-1]
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']])
- assert_(not np.shares_memory(*i.operands))
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(not np.shares_memory(*i.operands))
# Copy not needed with elementwise, 2 ops, exactly same arrays
x = arange(10)
b = x
i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
['readwrite', 'overlap_assume_elementwise']])
- assert_(i.operands[0] is a and i.operands[1] is b)
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']])
- assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+ with i:
+ assert_(i.operands[0] is a and i.operands[1] is b)
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
# Copy not needed, 2 ops, no overlap
x = arange(10)
x = arange(4, dtype=np.int8)
a = x[3:]
b = x.view(np.int32)[:1]
- i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
- assert_(not np.shares_memory(*i.operands))
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+ assert_(not np.shares_memory(*i.operands))
# Copy needed, 3 ops, read-write overlap
for flag in ['writeonly', 'readwrite']:
a = x
b = x.T
c = x
- i = nditer([a, b, c], ['copy_if_overlap'],
- [['readonly'], ['readonly'], [flag]])
- a2, b2, c2 = i.operands
- assert_(not np.shares_memory(a2, c2))
- assert_(not np.shares_memory(b2, c2))
+ with nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], [flag]]) as i:
+ a2, b2, c2 = i.operands
+ assert_(not np.shares_memory(a2, c2))
+ assert_(not np.shares_memory(b2, c2))
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
- i = nditer(a, ['buffered'], order='F', casting='unsafe',
- op_dtypes='>f8', buffersize=5)
- j = i.copy()
- i = None
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='>f8', buffersize=5) as i:
+ j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
def test_iter_allocate_output_simple():
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
- i.operands[1][:] = 1
- i.reset()
- for x in i:
- x[1][...] += x[0][...]
- assert_equal(i.operands[1], a+1)
+ with i:
+ i.operands[1][:] = 1
+ i.reset()
+ for x in i:
+ x[1][...] += x[0][...]
+ assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 15
- # matrix vs ndarray
- a = np.matrix([[1, 2], [3, 4]])
+ # subclass vs ndarray
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
- assert_(type(b) != type(i.operands[2]))
+ assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
- # matrix always wants things to be 2D
- b = np.arange(4).reshape(1, 2, 2)
- assert_raises(RuntimeError, nditer, [a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']])
- # but if subtypes are disabled, the result can still work
+ # If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
- [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
- assert_(type(a) != type(i.operands[2]))
- assert_equal(i.operands[2].shape, (1, 2, 2))
+ assert_(type(a) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
order='C',
buffersize=16)
x = 0
- while not i.finished:
- i[0] = x
- x += 1
- i.iternext()
+ with i:
+ while not i.finished:
+ i[0] = x
+ x += 1
+ i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
- assert_equal(i[0], 0)
- i[1] = 1
- assert_equal(i[0:2], [0, 1])
- assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
+ with i:
+ assert_equal(i[0], 0)
+ i[1] = 1
+ assert_equal(i[0:2], [0, 1])
+ assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
- for v in i:
- v[...] *= 2
+ with i:
+ for v in i:
+ v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_(np.all(x['a'] == count))
- x['a'][0] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_(np.all(x['a'] == count))
+ x['a'][0] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- count = 0
- for x in i:
- assert_equal(x['a'], count)
- x['a'] += 2
- count += 1
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ x['a'] += 2
+ count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
assert_equal(i[0].size, a.size)
-@dec.slow
+@pytest.mark.slow
def test_iter_buffered_reduce_reuse():
# large enough array for all views, including negative strides.
a = np.arange(2*3**5)[3**5:3**5+1]
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
- nditer2.operands[-1][...] = 0
- nditer2.reset()
- nditer2.iterindex = skip
+ with nditer2:
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
- for (a2_in, b2_in) in nditer2:
- b2_in += a2_in.astype(np.int_)
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
- comp_res = nditer2.operands[-1]
+ comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
- nditer1.operands[-1][...] = 0
- nditer1.reset()
- nditer1.iterindex = skip
+ with nditer1:
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
- for (a1_in, b1_in) in nditer1:
- b1_in += a1_in.astype(np.int_)
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
- res = nditer1.operands[-1]
+ res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
- # updateifcopy
+ # writebackifcopy - using conext manager
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ # writebackifcopy - using close()
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
- i, j, x, y = (None,)*4 # force the updateifcopy
+ i.close()
+ j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
vals.append([z for z in k])
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+ def test_iter_nested_iters_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Do the reduction
- for x, y in i:
- y[...] += x
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
- i.operands[1][...] = 0
- # Reduction shape/strides for the output
- assert_equal(i[1].shape, (6,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
- # Since no axes were specified, should have allocated a scalar
- assert_equal(i.operands[1].ndim, 0)
- assert_equal(i.operands[1], np.sum(a))
+ with i:
+ i.operands[1][...] = 0
+ # Reduction shape/strides for the output
+ assert_equal(i[1].shape, (6,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
- it1.operands[1].fill(0)
- it2.operands[1].fill(0)
- it2.reset()
- for x in it1:
- x[1][...] += x[0]
- for x in it2:
- x[1][...] += x[0]
- assert_equal(it1.operands[1], it2.operands[1])
- assert_equal(it2.operands[1].sum(), a.size)
+ with it1, it2:
+ it1.operands[1].fill(0)
+ it2.operands[1].fill(0)
+ it2.reset()
+ for x in it1:
+ x[1][...] += x[0]
+ for x in it2:
+ x[1][...] += x[0]
+ assert_equal(it1.operands[1], it2.operands[1])
+ assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
- assert_equal(i[1].dtype, np.dtype('f8'))
- assert_(i[1].dtype != b.dtype)
- # Do the reduction
- for x, y in i:
- y[...] += x
+ with i:
+ assert_equal(i[1].dtype, np.dtype('f8'))
+ assert_(i[1].dtype != b.dtype)
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
- assert_equal(i[1].shape, (3,))
- assert_equal(i[1].strides, (0,))
- # Do the reduction
- for x, y in i:
- # Use a for loop instead of ``y[...] += x``
- # (equivalent to ``y[...] = y[...].copy() + x``),
- # because y has zero strides we use for the reduction
- for j in range(len(y)):
- y[j] += x[j]
+ with i:
+ assert_equal(i[1].shape, (3,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
- it.operands[1].fill(0)
- it.reset()
- assert_equal(it[0], [1, 2, 1, 2])
+ with it:
+ it.operands[1].fill(0)
+ it.reset()
+ assert_equal(it[0], [1, 2, 1, 2])
# Iterator inner loop should take argument contiguity into account
x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
it = np.nditer([y, x],
['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['readonly']])
- for a, b in it:
- a.fill(2)
+ with it:
+ for a, b in it:
+ a.fill(2)
assert_equal(y_base[1::2], y_base_copy[1::2])
assert_equal(y_base[::2], 2)
buffersize=5)
bufsizes = []
- for x, y in it:
- bufsizes.append(x.shape[0])
+ with it:
+ for x, y in it:
+ bufsizes.append(x.shape[0])
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 1
+ with it:
+ for x, m in it:
+ x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, [1, 1, 1])
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
- for x, m in it:
- x[...] = 2.5
+ with it:
+ for x, m in it:
+ x[...] = 2.5
# Because we violated the semantics, all the values became 2.5
assert_equal(a, [2.5, 2.5, 2.5])
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
- for x, m in it:
- x[...] = 3
+ with it:
+ for x, m in it:
+ x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, [3, 3, 2.5])
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
-
-if __name__ == "__main__":
- run_module_suite()
+def test_writebacks():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][:] = 100
+ assert_equal(au, 100)
+ # do it again, this time raise an error,
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ try:
+ with it:
+ assert_equal(au.flags.writeable, False)
+ it.operands[0][:] = 0
+ raise ValueError('exit context manager on exception')
+ except:
+ pass
+ assert_equal(au, 0)
+ assert_equal(au.flags.writeable, True)
+ # cannot reuse i outside context manager
+ assert_raises(ValueError, getattr, it, 'operands')
+
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0]
+ x[:] = 6
+ assert_(x.flags.writebackifcopy)
+ assert_equal(au, 6)
+ assert_(not x.flags.writebackifcopy)
+ x[:] = 123 # x.data still valid
+ assert_equal(au, 6) # but not connected to au
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
+ # do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
+ with it:
+ for x in it:
+ x[...] = 123
+ # make sure we cannot reenter the closed iterator
+ enter = it.__enter__
+ assert_raises(RuntimeError, enter)
+
+def test_close_equivalent():
+ ''' using a context amanger and using nditer.close are equivalent
+ '''
+ def add_close(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ ret = it.operands[2]
+ it.close()
+ return ret
+
+ def add_context(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+ z = add_close(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+ z = add_context(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_warn_noclose():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del it
+ assert len(sup.log) == 1
import warnings
import itertools
import platform
+import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_raises_regex, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, dec, suppress_warnings
-)
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ suppress_warnings, HAS_REFCOUNT
+ )
class TestResize(object):
np.seterr(**old)
assert_(np.geterr() == old)
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
try:
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
- @dec.knownfailureif(True, "See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
-
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
- a = np.matrix([[1, 2], [3, 4]])
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
- assert_(type(b) is np.matrix)
+ assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
- assert_(type(b) is not np.matrix)
+ assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
-
-
-if __name__ == "__main__":
- run_module_suite()
import itertools
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises
-)
+from numpy.testing import assert_, assert_equal, assert_raises
# This is the structure of the table used for plain objects:
#
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import sys
-import locale
-import contextlib
-import nose
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, SkipTest, dec
-)
-from ._locales import CommaDecimalPointLocale
+from numpy.testing import assert_, assert_equal, SkipTest
+from numpy.core.tests._locales import CommaDecimalPointLocale
if sys.version_info[0] >= 3:
def test_locale_longdouble(self):
assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import warnings
import textwrap
from os import path
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises, assert_warns
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, assert_warns
)
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
+ @pytest.mark.xfail(reason="See gh-10411, becomes real error in 1.16")
def test_out_of_order_fields(self):
# names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
+
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
-
-if __name__ == "__main__":
- run_module_suite()
import gc
import warnings
import tempfile
+import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, IS_PYPY,
- assert_almost_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns, dec, suppress_warnings,
- _assert_valid_refcount, HAS_REFCOUNT,
+ assert_, assert_equal, IS_PYPY, assert_almost_equal,
+ assert_array_equal, assert_array_almost_equal, assert_raises,
+ assert_warns, suppress_warnings, _assert_valid_refcount, HAS_REFCOUNT,
)
from numpy.compat import asbytes, asunicode, long
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
+ def test_arange_inf_step(self):
+ ref = np.arange(0, 1, 10)
+ x = np.arange(0, 1, np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, 1, -10)
+ x = np.arange(0, 1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, -10)
+ x = np.arange(0, -1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, 10)
+ x = np.arange(0, -1, np.inf)
+ assert_array_equal(ref, x)
+
+ def test_arange_underflow_stop_and_step(self):
+ finfo = np.finfo(np.float64)
+
+ ref = np.arange(0, finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
- @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
- @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
y = np.add(x, x, x)
assert_equal(id(x), id(y))
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=float)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
+ def test_squeeze_axis_handling(self):
+ # Issue #10779
+ # Ensure proper handling of objects
+ # that don't support axis specification
+ # when squeezing
+
+ class OldSqueeze(np.ndarray):
+
+ def __new__(cls,
+ input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+ # it is perfectly reasonable that prior
+ # to numpy version 1.7.0 a subclass of ndarray
+ # might have been created that did not expect
+ # squeeze to have an axis argument
+ # NOTE: this example is somewhat artificial;
+ # it is designed to simulate an old API
+ # expectation to guard against regression
+ def squeeze(self):
+ return super(OldSqueeze, self).squeeze()
+
+ oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
+
+ # if no axis argument is specified the old API
+ # expectation should give the correct result
+ assert_equal(np.squeeze(oldsqueeze),
+ np.array([1,2,3]))
+
+ # likewise, axis=None should work perfectly well
+ # with the old API expectation
+ assert_equal(np.squeeze(oldsqueeze, axis=None),
+ np.array([1,2,3]))
+
+ # however, specification of any particular axis
+ # should raise a TypeError in the context of the
+ # old API specification, even when using a valid
+ # axis specification like 1 for this array
+ with assert_raises(TypeError):
+ # this would silently succeed for array
+ # subclasses / objects that did not support
+ # squeeze axis argument handling before fixing
+ # Issue #10779
+ np.squeeze(oldsqueeze, axis=1)
+
+ # check for the same behavior when using an invalid
+ # axis specification -- in this case axis=0 does not
+ # have size 1, but the priority should be to raise
+ # a TypeError for the axis argument and NOT a
+ # ValueError for squeezing a non-empty dimension
+ with assert_raises(TypeError):
+ np.squeeze(oldsqueeze, axis=0)
+
+ # the new API knows how to handle the axis
+ # argument and will return a ValueError if
+ # attempting to squeeze an axis that is not
+ # of length 1
+ with assert_raises(ValueError):
+ np.squeeze(np.array([[1],[2],[3]]), axis=0)
+
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
a = np.empty((100000000,), dtype='i1')
del a
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
assert_equal(uf(a), ())
assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
- # for now, there is just a futurewarning
- assert_warns(FutureWarning, va[:1].item)
- # in the future, test we got a bytes copy:
- #x = va[:1].item()
- #va[0] = b'\xff\xff\xff\xff'
- #del va
- #assert_equal(x, b'\x00\x00\x00\x00')
-
-if __name__ == "__main__":
- run_module_suite()
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
+
+ def test_void_getitem(self):
+ # Test fix for gh-11668.
+ assert_(np.array([b'a'], 'V1').astype('O') == b'a')
+ assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
+ assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
+ assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
+
+ def test_structarray_title(self):
+ # The following used to segfault on pypy, due to NPY_TITLE_KEY
+ # not working properly and resulting to double-decref of the
+ # structured array field items:
+ # See: https://bitbucket.org/pypy/pypy/issues/2789
+ for j in range(5):
+ structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
+ structure[0]['x'] = np.array([2])
+ gc.collect()
+
+ def test_dtype_scalar_squeeze(self):
+ # gh-11384
+ values = {
+ 'S': b"a",
+ 'M': "2018-06-20",
+ }
+ for ch in np.typecodes['All']:
+ if ch in 'O':
+ continue
+ sctype = np.dtype(ch).type
+ scvalue = sctype(values.get(ch, 3))
+ for axis in [None, ()]:
+ squeezed = scvalue.squeeze(axis=axis)
+ assert_equal(squeezed, scvalue)
+ assert_equal(type(squeezed), type(scvalue))
+
+ def test_field_access_by_title(self):
+ # gh-11507
+ s = 'Some long field name'
+ if HAS_REFCOUNT:
+ base = sys.getrefcount(s)
+ t = np.dtype([((s, 'f1'), np.float64)])
+ data = np.zeros(10, t)
+ for i in range(10):
+ v = str(data[['f1']])
+ if HAS_REFCOUNT:
+ assert_(base <= sys.getrefcount(s))
import sys
import platform
-import numpy as np
+import pytest
+import numpy as np
from numpy.testing import (
- run_module_suite,
assert_equal, assert_almost_equal, assert_raises, assert_warns,
- dec
-)
+ )
class TestFromString(object):
def test_floating(self):
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
- @dec.knownfailureif((sys.version_info[0] >= 3) or
- (sys.platform == "win32" and
- platform.architecture()[0] == "64bit"),
- "numpy.intp('0xff', 16) not supported on Py3, "
- "as it does not inherit from Python int")
+ @pytest.mark.skipif((sys.version_info[0] >= 3)
+ or (sys.platform == "win32"
+ and platform.architecture()[0] == "64bit"),
+ reason="numpy.intp('0xff', 16) not supported on Py3 "
+ "or 64 bit Windows")
def test_intp(self):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
def test_uint64_from_negative(self):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
-
-
-if __name__ == "__main__":
- run_module_suite()
"""
import sys
import numpy as np
-from numpy.testing import run_module_suite, assert_, assert_equal, dec
+import pytest
+
+from numpy.testing import assert_, assert_equal, assert_raises
# PEP3118 format strings for native (standard alignment and byteorder) types
scalars_and_codes = [
]
+@pytest.mark.skipif(sys.version_info.major < 3,
+ reason="Python 2 scalars lack a buffer interface")
class TestScalarPEP3118(object):
- skip_if_no_buffer_interface = dec.skipif(sys.version_info.major < 3,
- "scalars do not implement buffer interface in Python 2")
- @skip_if_no_buffer_interface
def test_scalar_match_array(self):
for scalar, _ in scalars_and_codes:
x = scalar()
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
- @skip_if_no_buffer_interface
def test_scalar_dim(self):
for scalar, _ in scalars_and_codes:
x = scalar()
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
- @skip_if_no_buffer_interface
def test_scalar_known_code(self):
for scalar, code in scalars_and_codes:
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.format, code)
- @skip_if_no_buffer_interface
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
-if __name__ == "__main__":
- run_module_suite()
+ def test_datetime_memoryview(self):
+ # gh-11656
+ # Values verified with v1.13.3, shape is not () as in test_scalar_dim
+ def as_dict(m):
+ return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+ ndim=m.ndim, format=m.format)
+
+ dt1 = np.datetime64('2016-01-01')
+ dt2 = np.datetime64('2017-01-01')
+ expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1,
+ 'shape': (8,), 'format': 'B'}
+ v = memoryview(dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ v = memoryview(dt2 - dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(1, dt)
+ # Fails to create a PEP 3118 valid buffer
+ assert_raises((ValueError, BufferError), memoryview, a[0])
+
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import run_module_suite, assert_
+from numpy.testing import assert_
class A(object):
res_np = np_s * np_i
res_s = b'abc' * 5
assert_(res_np == res_s)
-
-
-if __name__ == "__main__":
- run_module_suite()
import itertools
import operator
import platform
+import pytest
import numpy as np
from numpy.testing import (
- run_module_suite,
- assert_, assert_equal, assert_raises,
- assert_almost_equal, assert_allclose, assert_array_equal,
- IS_PYPY, suppress_warnings, dec, _gen_alignment_data, assert_warns
-)
+ assert_, assert_equal, assert_raises, assert_almost_equal, assert_allclose,
+ assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
+ assert_warns
+ )
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
assert_raises(OverflowError, int, x)
assert_equal(len(sup.log), 1)
- @dec.knownfailureif(not IS_PYPY,
- "__int__ is not the same as int in cpython (gh-9972)")
+ @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
def test_int_from_infinite_longdouble___int__(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
- @dec.knownfailureif(platform.machine().startswith("ppc64"))
- @dec.skipif(np.finfo(np.double) == np.finfo(np.longdouble))
+ @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+ @pytest.mark.skipif(platform.machine().startswith("ppc64"),
+ reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
# use exponent that avoids bug in Darwin pow function.
def test_numpy_abs(self):
self._test_abs_func(np.abs)
-
-
-if __name__ == "__main__":
- run_module_suite()
"""
from __future__ import division, absolute_import, print_function
-import numpy as np
-from numpy.testing import assert_, assert_equal, run_module_suite
+import code, sys
+import platform
+import pytest
+from tempfile import TemporaryFile
+import numpy as np
+from numpy.testing import assert_, assert_equal, suppress_warnings, dec
class TestRealScalars(object):
def test_str(self):
check(1e15)
check(1e16)
+ def test_py2_float_print(self):
+ # gh-10753
+ # In python2, the python float type implements an obsolte method
+ # tp_print, which overrides tp_repr and tp_str when using "print" to
+ # output to a "real file" (ie, not a StringIO). Make sure we don't
+ # inherit it.
+ x = np.double(0.1999999999999)
+ with TemporaryFile('r+t') as f:
+ print(x, file=f)
+ f.seek(0)
+ output = f.read()
+ assert_equal(output, str(x) + '\n')
+ # In python2 the value float('0.1999999999999') prints with reduced
+ # precision as '0.2', but we want numpy's np.double('0.1999999999999')
+ # to print the unique value, '0.1999999999999'.
+
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ # py2 code.interact sends irrelevant internal DeprecationWarnings
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
def test_dragon4(self):
# these tests are adapted from Ryan Juckett's dragon4 implementation,
# see dragon4.c for details.
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
+ @pytest.mark.skipif(not platform.machine().startswith("ppc64"),
+ reason="only applies to ppc float128 values")
+ def test_ppc64_ibm_double_double128(self):
+ # check that the precision decreases once we get into the subnormal
+ # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
+ # which happens when the first double is normal and the second is
+ # subnormal.
+ x = np.float128('2.123123123123123123123123123123123e-286')
+ got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
+ expected = [
+ "1.06156156156156156156156156156157e-286",
+ "1.06156156156156156156156156156158e-287",
+ "1.06156156156156156156156156156159e-288",
+ "1.0615615615615615615615615615616e-289",
+ "1.06156156156156156156156156156157e-290",
+ "1.06156156156156156156156156156156e-291",
+ "1.0615615615615615615615615615616e-292",
+ "1.0615615615615615615615615615615e-293",
+ "1.061561561561561561561561561562e-294",
+ "1.06156156156156156156156156155e-295",
+ "1.0615615615615615615615615616e-296",
+ "1.06156156156156156156156156e-297",
+ "1.06156156156156156156156157e-298",
+ "1.0615615615615615615615616e-299",
+ "1.06156156156156156156156e-300",
+ "1.06156156156156156156155e-301",
+ "1.0615615615615615615616e-302",
+ "1.061561561561561561562e-303",
+ "1.06156156156156156156e-304",
+ "1.0615615615615615618e-305",
+ "1.06156156156156156e-306",
+ "1.06156156156156157e-307",
+ "1.0615615615615616e-308",
+ "1.06156156156156e-309",
+ "1.06156156156157e-310",
+ "1.0615615615616e-311",
+ "1.06156156156e-312",
+ "1.06156156154e-313",
+ "1.0615615616e-314",
+ "1.06156156e-315",
+ "1.06156155e-316",
+ "1.061562e-317",
+ "1.06156e-318",
+ "1.06155e-319",
+ "1.0617e-320",
+ "1.06e-321",
+ "1.04e-322",
+ "1e-323",
+ "0.0",
+ "0.0"]
+ assert_equal(got, expected)
+
+ # Note: we follow glibc behavior, but it (or gcc) might not be right.
+ # In particular we can get two values that print the same but are not
+ # equal:
+ a = np.float128('2')/np.float128('3')
+ b = np.float128(str(a))
+ assert_equal(str(a), str(b))
+ assert_(a != b)
+
def float32_roundtrip(self):
# gh-9360
x = np.float32(1024 - 2**-14)
# gh-2643, gh-6136, gh-6908
assert_equal(repr(np.float64(0.1)), repr(0.1))
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
-
-if __name__ == "__main__":
- run_module_suite()
import warnings
import numpy as np
-from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
- block, vstack, hstack, newaxis, concatenate, stack)
-from numpy.testing import (assert_, assert_raises,
- assert_array_equal, assert_equal, run_module_suite,
- assert_raises_regex, assert_almost_equal)
+from numpy.core import (
+ array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
+ newaxis, concatenate, stack
+ )
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, assert_equal,
+ assert_raises_regex, assert_almost_equal
+ )
from numpy.compat import long
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
- # np.matrix
- m = np.matrix([[1, 2], [3, 4]])
- assert_raises_regex(ValueError, 'shape too large to be a matrix',
- stack, [m, m])
class TestBlock(object):
[3., 3., 3.]]])
assert_equal(result, expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_no_warnings, assert_allclose,
-)
+ assert_, assert_equal, assert_raises, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
+ assert_allclose,
+ )
class TestUfuncKwargs(object):
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
dtype=int)
+ def test_extobj_refcount(self):
+ # Should not segfault with USE_DEBUG.
+ assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
+
class TestUfunc(object):
def test_pickle(self):
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
- assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(i),(i)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 1, 0))
+ assert_equal(ixs, (0, 0))
- # pass. empty core signature; treat as plain ufunc (with trivial core)
- assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
+ # empty core signature; treat as plain ufunc (with trivial core)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(),()->()")
+ assert_equal(enabled, 0)
+ assert_equal(num_dims, (0, 0, 0))
+ assert_equal(ixs, ())
# in the following calls, a ValueError should be raised because
# of error in core signature
pass
# more complicated names for variables
- assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
+ enabled, num_dims, ixs = umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
d += d
assert_almost_equal(d, 2. + 2j)
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
- in1d = umt.inner1d
+ inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
- c = in1d(a, b)
+ c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
- c = in1d(a, b, axes=[(-1,), (-1,), ()])
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
- c = in1d(a, b, axes=[-1, -1, ()])
+ c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
- c = in1d(a, b, axes=[(-1,), -1, ()])
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
- c = in1d(a, b, axes=[-1, -1])
+ c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
- c = in1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
- c = in1d(a, b, axes=[0, 0])
+ c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
- c = in1d(a, b, axes=[0, 2])
+ c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
- # Check errors for inproperly constructed axes arguments.
+ # Check errors for improperly constructed axes arguments.
# should have list.
- assert_raises(TypeError, in1d, a, b, axes=-1)
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
- assert_raises(ValueError, in1d, a, b, axes=[-1])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
- assert_raises(TypeError, in1d, a, b, axes=[-1.0, -1.0])
- assert_raises(TypeError, in1d, a, b, axes=[(-1.0,), -1])
- assert_raises(TypeError, in1d, a, b, axes=[None, 1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
- assert_raises(TypeError, in1d, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
- assert_raises(ValueError, in1d, a, b, axes=[-1, -1, (-1,)])
- assert_raises(ValueError, in1d, a, b, axes=[-1, (-2, -1), ()])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
- assert_raises(ValueError, in1d, a, b, axes=[0, 1])
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
- # Check errors for inproperly constructed axes arguments.
+ # Check errors for improperly constructed axes arguments.
# wrong argument
assert_raises(TypeError, mm, a, b, axis=1)
# axes should be list
assert_raises(ValueError, mm, z, z, out=z[:, 0])
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ # Regular ufuncs should not accept axes.
+ assert_raises(TypeError, np.add, 1., 1., axes=[0])
+ # should be able to deal with bad unrelated kwargs.
+ assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
+
+ def test_axis_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axis=-1)
+ assert_array_equal(c, (a * b).sum(-1))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, axis=-1, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ c = inner1d(a, b, axis=0)
+ assert_array_equal(c, (a * b).sum(0))
+ # Sanity checks on innerwt and cumsum.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0),
+ np.sum(a * b * w, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
+ out = np.empty_like(a)
+ b = umt.cumsum(a, out=out, axis=0)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=0))
+ b = umt.cumsum(a, out=out, axis=1)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=-1))
+ # Check errors.
+ # Cannot pass in both axis and axes.
+ assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
+ # Not an integer.
+ assert_raises(TypeError, inner1d, a, b, axis=[0])
+ # more than 1 core dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # Output wrong size in axis.
+ out = np.empty((1, 2, 3), dtype=a.dtype)
+ assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
+ # Regular ufuncs should not accept axis.
+ assert_raises(TypeError, np.add, 1., 1., axis=0)
+
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axis and axes.
+ c = inner1d(a, b, axis=-1, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=False))
+ c = inner1d(a, b, axis=-1, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axis=0, keepdims=False)
+ assert_array_equal(c, (a * b).sum(0, keepdims=False))
+ c = inner1d(a, b, axis=0, keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
+ np.sum(a * b * w, axis=0, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # More than 1 core dimension, and core output dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+ # Regular ufuncs should not accept keepdims.
+ assert_raises(TypeError, np.add, 1., 1., keepdims=False)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
+ def test_cumsum(self):
+ a = np.arange(10)
+ result = umt.cumsum(a)
+ assert_array_equal(result, a.cumsum())
+
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
+ def test_object_comparison(self):
+ class HasComparisons(object):
+ def __eq__(self, other):
+ return '=='
+
+ arr0d = np.array(HasComparisons())
+ assert_equal(arr0d == arr0d, True)
+ assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
+ assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
+
+ arr1d = np.array([HasComparisons()])
+ assert_equal(arr1d == arr1d, np.array([True]))
+ assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
+ assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
- def test_object_scalar_multiply(self):
- # Tickets #2469 and #4482
- arr = np.matrix([1, 2], dtype=object)
- desired = np.matrix([[3, 6]], dtype=object)
- assert_equal(np.multiply(arr, 3), desired)
- assert_equal(np.multiply(3, arr), desired)
-
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sqrt(a, where=m), [1])
def check_identityless_reduction(self, a):
- # np.minimum.reduce is a identityless reduction
+ # np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
# too little
assert_raises(TypeError, f)
# too much
- assert_raises(TypeError, f, d, 0, None, None, False, 1)
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
-
-
-if __name__ == "__main__":
- run_module_suite()
import warnings
import fnmatch
import itertools
+import pytest
import numpy.core.umath as ncu
from numpy.core import _umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises,
- assert_raises_regex, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, dec, assert_allclose, assert_no_warnings,
- suppress_warnings, _gen_alignment_data,
-)
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ assert_allclose, assert_no_warnings, suppress_warnings,
+ _gen_alignment_data, assert_warns
+ )
def on_powerpc():
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
+ def test_reduce_warns(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are emitted
+ for n in (2, 4, 8, 16, 32):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ for r in np.diagflat([np.nan] * n):
+ assert_equal(np.min(r), np.nan)
+ assert_equal(len(sup.log), n)
+
+ def test_minimize_warns(self):
+ # gh 11589
+ assert_warns(RuntimeWarning, np.minimum, np.nan, 1)
+
class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
assert_equal(args[1], a)
assert_equal(i, 0)
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.empty(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are calld by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+
def test_wrap_with_iterable(self):
# test fix for bug #1026:
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
+ def test_failing_out_wrap(self):
+
+ singleton = np.array([1.0])
+
+ class Ok(np.ndarray):
+ def __array_wrap__(self, obj):
+ return singleton
+
+ class Bad(np.ndarray):
+ def __array_wrap__(self, obj):
+ raise RuntimeError
+
+ ok = np.empty(1).view(Ok)
+ bad = np.empty(1).view(Bad)
+
+ # double-free (segfault) of "ok" if "bad" raises an exception
+ for i in range(10):
+ assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
+
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
assert_equal(ncu.maximum(a, C()), 0)
def test_ufunc_override(self):
-
+ # check override works even with instance with high priority.
class A(object):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
+
a = A()
- b = np.matrix([1])
+ b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
assert_raises(TypeError, np.multiply, a)
assert_raises(TypeError, np.multiply, a, a, a, a)
assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
+ assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
- keepdims='keep0')
+ keepdims='keep0', initial='init0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
- 'axis': 'axis0'})
+ 'axis': 'axis0',
+ 'initial': 'init0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
+ # np._NoValue ignored for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ # None kept for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
# outer, wrong args
assert_raises(TypeError, np.multiply.outer, a)
assert_raises(TypeError, np.multiply.outer, a, a, a, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
# at
res = np.multiply.at(a, [4, 2], 'b0')
for dtype in [np.complex64, np.complex_]:
self.check_loss_of_precision(dtype)
- @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
+ @pytest.mark.skipif(is_longdouble_finfo_bogus(),
+ reason="Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
def test_nextafter():
return _test_nextafter(np.float64)
+
def test_nextafterf():
return _test_nextafter(np.float32)
-@dec.knownfailureif(sys.platform == 'win32',
- "Long double support buggy on win32, ticket 1664.")
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
def test_nextafterl():
return _test_nextafter(np.longdouble)
+
def test_nextafter_0():
for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
tiny = np.finfo(t).tiny
def test_spacingf():
return _test_spacing(np.float32)
-@dec.knownfailureif(sys.platform == 'win32',
- "Long double support buggy on win32, ticket 1664.")
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
def test_spacingl():
return _test_spacing(np.longdouble)
with assert_no_warnings():
a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
np.isnan(a)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import platform
+import pytest
import numpy as np
import numpy.core.umath as ncu
from numpy.testing import (
- run_module_suite, assert_raises, assert_equal, assert_array_equal,
- assert_almost_equal, dec
-)
+ assert_raises, assert_equal, assert_array_equal, assert_almost_equal
+ )
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
+#FIXME: this will probably change when we require full C99 campatibility
with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
# TODO: replace with a check on whether platform-provided C99 funcs are used
-skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+
+# TODO This can be xfail when the generator functions are got rid of.
+platform_skip = pytest.mark.skipif(xfail_complex_tests,
+ reason="Inadequate C99 complex support")
-def platform_skip(func):
- return dec.skipif(skip_complex_tests,
- "Numpy is using complex functions (e.g. sqrt) provided by your"
- "platform's C library. However, they do not seem to behave according"
- "to C99 -- so C99 tests are skipped.")(func)
class TestCexp(object):
check = check_complex_value
f = np.exp
- yield check, f, 1, 0, np.exp(1), 0, False
- yield check, f, 0, 1, np.cos(1), np.sin(1), False
+ check(f, 1, 0, np.exp(1), 0, False)
+ check(f, 0, 1, np.cos(1), np.sin(1), False)
ref = np.exp(1) * complex(np.cos(1), np.sin(1))
- yield check, f, 1, 1, ref.real, ref.imag, False
+ check(f, 1, 1, ref.real, ref.imag, False)
@platform_skip
def test_special_values(self):
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
- yield check, f, np.PZERO, 0, 1, 0, False
- yield check, f, np.NZERO, 0, 1, 0, False
+ check(f, np.PZERO, 0, 1, 0, False)
+ check(f, np.NZERO, 0, 1, 0, False)
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
- yield check, f, 1, np.inf, np.nan, np.nan
- yield check, f, -1, np.inf, np.nan, np.nan
- yield check, f, 0, np.inf, np.nan, np.nan
+ check(f, 1, np.inf, np.nan, np.nan)
+ check(f, -1, np.inf, np.nan, np.nan)
+ check(f, 0, np.inf, np.nan, np.nan)
# cexp(inf + 0i) is inf + 0i
- yield check, f, np.inf, 0, np.inf, 0
+ check(f, np.inf, 0, np.inf, 0)
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
- yield check, f, -np.inf, 1, np.PZERO, np.PZERO
- yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO
+ check(f, -np.inf, 1, np.PZERO, np.PZERO)
+ check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
- yield check, f, np.inf, 1, np.inf, np.inf
- yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf
+ check(f, np.inf, 1, np.inf, np.inf)
+ check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_inf, None
+ _check_ninf_inf(None)
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_inf_inf, None
+ _check_inf_inf(None)
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_nan, None
+ _check_ninf_nan(None)
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_inf_nan, None
+ _check_inf_nan(None)
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
- yield check, f, np.nan, 1, np.nan, np.nan
- yield check, f, np.nan, -1, np.nan, np.nan
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, -1, np.nan, np.nan)
- yield check, f, np.nan, np.inf, np.nan, np.nan
- yield check, f, np.nan, -np.inf, np.nan, np.nan
+ check(f, np.nan, np.inf, np.nan, np.nan)
+ check(f, np.nan, -np.inf, np.nan, np.nan)
# cexp(nan + nani) is nan + nani
- yield check, f, np.nan, np.nan, np.nan, np.nan
+ check(f, np.nan, np.nan, np.nan, np.nan)
- @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations")
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
check = check_complex_value
f = np.exp
- yield check, f, np.nan, 0, np.nan, 0
+ check(f, np.nan, 0, np.nan, 0)
class TestClog(object):
def test_simple(self):
assert_almost_equal(y[i], y_r[i])
@platform_skip
- @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_special_values(self):
xl = []
yl = []
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
+
class TestCsqrt(object):
def test_simple(self):
# sqrt(1)
- yield check_complex_value, np.sqrt, 1, 0, 1, 0
+ check_complex_value(np.sqrt, 1, 0, 1, 0)
# sqrt(1i)
- yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False
+ rres = 0.5*np.sqrt(2)
+ ires = rres
+ check_complex_value(np.sqrt, 0, 1, rres, ires, False)
# sqrt(-1)
- yield check_complex_value, np.sqrt, -1, 0, 0, 1
+ check_complex_value(np.sqrt, -1, 0, 0, 1)
def test_simple_conjugate(self):
ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
- yield check_complex_value, f, 1, 1, ref.real, ref.imag, False
+
+ check_complex_value(f, 1, 1, ref.real, ref.imag, False)
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
f = np.sqrt
# csqrt(+-0 + 0i) is 0 + 0i
- yield check, f, np.PZERO, 0, 0, 0
- yield check, f, np.NZERO, 0, 0, 0
+ check(f, np.PZERO, 0, 0, 0)
+ check(f, np.NZERO, 0, 0, 0)
# csqrt(x + infi) is inf + infi for any x (including NaN)
- yield check, f, 1, np.inf, np.inf, np.inf
- yield check, f, -1, np.inf, np.inf, np.inf
+ check(f, 1, np.inf, np.inf, np.inf)
+ check(f, -1, np.inf, np.inf, np.inf)
- yield check, f, np.PZERO, np.inf, np.inf, np.inf
- yield check, f, np.NZERO, np.inf, np.inf, np.inf
- yield check, f, np.inf, np.inf, np.inf, np.inf
- yield check, f, -np.inf, np.inf, np.inf, np.inf
- yield check, f, -np.nan, np.inf, np.inf, np.inf
+ check(f, np.PZERO, np.inf, np.inf, np.inf)
+ check(f, np.NZERO, np.inf, np.inf, np.inf)
+ check(f, np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.nan, np.inf, np.inf, np.inf)
# csqrt(x + nani) is nan + nani for any finite x
- yield check, f, 1, np.nan, np.nan, np.nan
- yield check, f, -1, np.nan, np.nan, np.nan
- yield check, f, 0, np.nan, np.nan, np.nan
+ check(f, 1, np.nan, np.nan, np.nan)
+ check(f, -1, np.nan, np.nan, np.nan)
+ check(f, 0, np.nan, np.nan, np.nan)
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
- yield check, f, -np.inf, 1, np.PZERO, np.inf
+ check(f, -np.inf, 1, np.PZERO, np.inf)
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
- yield check, f, np.inf, 1, np.inf, np.PZERO
+ check(f, np.inf, 1, np.inf, np.PZERO)
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
- yield _check_ninf_nan, None
+ _check_ninf_nan(None)
# csqrt(+inf + nani) is inf + nani
- yield check, f, np.inf, np.nan, np.inf, np.nan
+ check(f, np.inf, np.nan, np.inf, np.nan)
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
- yield check, f, np.nan, 0, np.nan, np.nan
- yield check, f, np.nan, 1, np.nan, np.nan
- yield check, f, np.nan, np.nan, np.nan, np.nan
+ check(f, np.nan, 0, np.nan, np.nan)
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, np.nan, np.nan, np.nan)
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
- yield check_real_value, np.abs, np.nan, np.nan, np.nan
+ check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append(-np.nan)
- yield check_real_value, np.abs, -np.nan, np.nan, np.nan
+ check_real_value(np.abs, -np.nan, np.nan, np.nan)
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
- yield check_real_value, np.abs, np.inf, np.nan, np.inf
+ check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append(-np.inf)
y.append(np.nan)
- yield check_real_value, np.abs, -np.inf, np.nan, np.inf
+ check_real_value(np.abs, -np.inf, np.nan, np.inf)
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
xa = np.array(x, dtype=complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
- yield check_real_value, f, x[i], y[i], ref
+ check_real_value(f, x[i], y[i], ref)
class TestCarg(object):
def test_simple(self):
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
- @dec.knownfailureif(True,
- "Complex arithmetic with signed zero is buggy on most implementation")
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(
+ reason="Complex arithmetic with signed zero fails on most platforms")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
- yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False
+ check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
# carg(+0 +- 0i) returns +- 0
- yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO
- yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO
+ check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+ check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
# carg(x +- 0i) returns +- 0 for x > 0
- yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False
- yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False
+ check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
+ check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
# carg(x +- 0i) returns +- pi for x < 0
- yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False
- yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False
+ check_real_value(ncu._arg, -1, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
# carg(+- 0 + yi) returns pi/2 for y > 0
- yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False
+ check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
# carg(+- 0 + yi) returns -pi/2 for y < 0
- yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False
+ check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
- yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False
- yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False
+ check_real_value(ncu._arg, -np.inf, 1, np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
# carg(np.inf +- yi) returns +-0 for finite y > 0
- yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False
- yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False
+ check_real_value(ncu._arg, np.inf, 1, np.PZERO, False)
+ check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
# carg(x +- np.infi) returns +-pi/2 for finite x
- yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False
- yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False
+ check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
# carg(-np.inf +- np.infi) returns +-3pi/4
- yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False
- yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False
+ check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
# carg(np.inf +- np.infi) returns +-pi/4
- yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False
- yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False
+ check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False)
+ check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
# carg(x + yi) returns np.nan if x or y is nan
- yield check_real_value, ncu._arg, np.nan, 0, np.nan, False
- yield check_real_value, ncu._arg, 0, np.nan, np.nan, False
+ check_real_value(ncu._arg, np.nan, 0, np.nan, False)
+ check_real_value(ncu._arg, 0, np.nan, np.nan, False)
+
+ check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
+ check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
- yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False
- yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
else:
assert_almost_equal(f(z1), x)
+
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
z2 = complex(x2, y2)
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.compat import unicode
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal)
+from numpy.testing import assert_, assert_equal, assert_array_equal
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
-
-
-if __name__ == "__main__":
- run_module_suite()
--- /dev/null
+"""
+Shim for _umath_tests to allow a deprecation period for the new name.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn(("numpy.core.umath_tests is an internal NumPy "
+ "module and should not be imported. It will "
+ "be removed in a future NumPy release."),
+ category=DeprecationWarning, stacklevel=2)
+
+from ._umath_tests import *
_pointer_type_cache[(dtype, shape, ndim, num)] = klass
return klass
-if ctypes is not None:
- ct = ctypes
- ################################################################
- # simple types
-
- # maps the numpy typecodes like '<f8' to simple ctypes types like
- # c_double. Filled in by prep_simple.
- _typecodes = {}
-
- def prep_simple(simple_type, dtype):
- """Given a ctypes simple type, construct and attach an
- __array_interface__ property to it if it does not yet have one.
- """
- try: simple_type.__array_interface__
- except AttributeError: pass
- else: return
-
- typestr = _dtype(dtype).str
- _typecodes[typestr] = simple_type
-
- def __array_interface__(self):
- return {'descr': [('', typestr)],
- '__ref': self,
- 'strides': None,
- 'shape': (),
- 'version': 3,
- 'typestr': typestr,
- 'data': (ct.addressof(self), False),
- }
-
- simple_type.__array_interface__ = property(__array_interface__)
+def _get_typecodes():
+ """ Return a dictionary mapping __array_interface__ formats to ctypes types """
+ ct = ctypes
simple_types = [
- ((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
- ((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
- ((ct.c_float, ct.c_double), "f"),
+ ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+ ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+ ct.c_float, ct.c_double,
]
- # Prep that numerical ctypes types:
- for types, code in simple_types:
- for tp in types:
- prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
+ return {_dtype(ctype).str: ctype for ctype in simple_types}
- ################################################################
- # array types
- _ARRAY_TYPE = type(ct.c_int * 1)
+def _ctype_ndarray(element_type, shape):
+ """ Create an ndarray of the given element type and shape """
+ for dim in shape[::-1]:
+ element_type = element_type * dim
+ return element_type
- def prep_array(array_type):
- """Given a ctypes array type, construct and attach an
- __array_interface__ property to it if it does not yet have one.
- """
- try: array_type.__array_interface__
- except AttributeError: pass
- else: return
-
- shape = []
- ob = array_type
- while type(ob) is _ARRAY_TYPE:
- shape.append(ob._length_)
- ob = ob._type_
- shape = tuple(shape)
- ai = ob().__array_interface__
- descr = ai['descr']
- typestr = ai['typestr']
-
- def __array_interface__(self):
- return {'descr': descr,
- '__ref': self,
- 'strides': None,
- 'shape': shape,
- 'version': 3,
- 'typestr': typestr,
- 'data': (ct.addressof(self), False),
- }
-
- array_type.__array_interface__ = property(__array_interface__)
-
- def prep_pointer(pointer_obj, shape):
- """Given a ctypes pointer object, construct and
- attach an __array_interface__ property to it if it does not
- yet have one.
- """
- try: pointer_obj.__array_interface__
- except AttributeError: pass
- else: return
-
- contents = pointer_obj.contents
- dtype = _dtype(type(contents))
-
- inter = {'version': 3,
- 'typestr': dtype.str,
- 'data': (ct.addressof(contents), False),
- 'shape': shape}
-
- pointer_obj.__array_interface__ = inter
- ################################################################
- # public functions
+if ctypes is not None:
+ _typecodes = _get_typecodes()
def as_array(obj, shape=None):
- """Create a numpy array from a ctypes array or a ctypes POINTER.
+ """
+ Create a numpy array from a ctypes array or POINTER.
+
The numpy array shares the memory with the ctypes object.
- The size parameter must be given if converting from a ctypes POINTER.
- The size parameter is ignored if converting from a ctypes array
+ The shape parameter must be given if converting from a ctypes POINTER.
+ The shape parameter is ignored if converting from a ctypes array
"""
- tp = type(obj)
- try: tp.__array_interface__
- except AttributeError:
- if hasattr(obj, 'contents'):
- prep_pointer(obj, shape)
- else:
- prep_array(tp)
+ if isinstance(obj, ctypes._Pointer):
+ # convert pointers to an array of the desired shape
+ if shape is None:
+ raise TypeError(
+ 'as_array() requires a shape argument when called on a '
+ 'pointer')
+ p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+ obj = ctypes.cast(obj, p_arr_type).contents
+
return array(obj, copy=False)
def as_ctypes(obj):
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
- tp = _typecodes[ai["typestr"]]
- for dim in ai["shape"][::-1]:
- tp = tp * dim
+ tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
result = tp.from_address(addr)
result.__keep = ai
return result
# Normally numpy is installed if the above import works, but an interrupted
# in-place build could also have left a __config__.py. In that case the
# next import may still fail, so keep it inside the try block.
- from numpy.testing.nosetester import _numpy_tester
- test = _numpy_tester().test
+ from numpy.testing._private.pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
except ImportError:
pass
def customized_fcompiler(plat=None, compiler=None):
from numpy.distutils.fcompiler import new_fcompiler
c = new_fcompiler(plat=plat, compiler=compiler)
- c.customize()
+ c.customize()
return c
def customized_ccompiler(plat=None, compiler=None):
return ret
def _compile (self, body, headers, include_dirs, lang):
- return self._wrap_method(old_config._compile, lang,
- (body, headers, include_dirs, lang))
+ src, obj = self._wrap_method(old_config._compile, lang,
+ (body, headers, include_dirs, lang))
+ # _compile in unixcompiler.py sometimes creates .d dependency files.
+ # Clean them up.
+ self.temp_files.append(obj + '.d')
+ return src, obj
def _link (self, body,
headers, include_dirs,
return ''
return os.sep.join(l)
+def sorted_glob(fileglob):
+ """sorts output of python glob for http://bugs.python.org/issue30461
+ to allow extensions to have reproducible build results"""
+ return sorted(glob.glob(fileglob))
+
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
- p = glob.glob(n)
- p2 = glob.glob(njoin(local_path, n))
+ p = sorted_glob(n)
+ p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
# get *.h files from list of directories
headers = []
for d in directory_list:
- head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
+ head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
- dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
+ dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
- #. *.txt -> parent/a.txt, parent/b.txt
- #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
- #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
+ #. ``*``.txt -> parent/a.txt, parent/b.txt
+ #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
+ #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
- #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
- #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
- #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+ #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
-if os.path.isdir(extra_dll_dir) and sys.platform == 'win32':
- try:
- from ctypes import windll, c_wchar_p
- _AddDllDirectory = windll.kernel32.AddDllDirectory
- _AddDllDirectory.argtypes = [c_wchar_p]
- # Needed to initialize AddDllDirectory modifications
- windll.kernel32.SetDefaultDllDirectories(0x1000)
- except AttributeError:
- def _AddDllDirectory(dll_directory):
- os.environ.setdefault('PATH', '')
- os.environ['PATH'] += os.pathsep + dll_directory
-
- _AddDllDirectory(extra_dll_dir)
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
""")
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
if not atlas_info:
atlas_info = get_info('atlas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- lapack_mkl_info):
- # Use the system lapack from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_lapack = 0
need_blas = 0
if not atlas_info:
atlas_info = get_info('atlas_blas')
- if sys.platform == 'darwin' \
- and not os.getenv('_PYTHON_HOST_PLATFORM', None) \
- and not (atlas_info or openblas_info or
- blas_mkl_info or blis_info):
- # Use the system BLAS from Accelerate or vecLib under OSX
- args = []
- link_args = []
- if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
- 'x86_64' in get_platform() or \
- 'i386' in platform.platform():
- intel = 1
- else:
- intel = 0
- if os.path.exists('/System/Library/Frameworks'
- '/Accelerate.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
- elif os.path.exists('/System/Library/Frameworks'
- '/vecLib.framework/'):
- if intel:
- args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
- args.extend([
- '-I/System/Library/Frameworks/vecLib.framework/Headers'])
- link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
- if args:
- self.set_info(extra_compile_args=args,
- extra_link_args=link_args,
- define_macros=[('NO_ATLAS_INFO', 3),
- ('HAVE_CBLAS', None)])
- return
+ accelerate_info = get_info('accelerate')
+ if accelerate_info and not atlas_info:
+ self.set_info(**accelerate_info)
+ return
need_blas = 0
info = {}
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
- s = """void zungqr();
+ s = """void zungqr_();
int main(int argc, const char *argv[])
{
zungqr_();
include_dirs=incl_dirs)
self.set_info(**info)
+class accelerate_info(system_info):
+ section = 'accelerate'
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', ['accelerate', 'veclib'])
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ else:
+ args.extend(['-faltivec'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
class blas_src_info(system_info):
section = 'blas_src'
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
-from numpy.testing import run_module_suite, tempdir, assert_
+from numpy.testing import tempdir, assert_
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite
+from numpy.testing import assert_
import numpy.distutils.fcompiler
for vs, _ in g77_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-
-
-if __name__ == '__main__':
- run_module_suite()
from __future__ import division, absolute_import, print_function
import numpy.distutils.fcompiler
-from numpy.testing import run_module_suite, assert_
+from numpy.testing import assert_
intel_32bit_version_strings = [
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
-
-
-if __name__ == '__main__':
- run_module_suite()
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite
-
+from numpy.testing import assert_
import numpy.distutils.fcompiler
nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
v = fc.version_match(vs)
assert_(v == version)
-
-
-if __name__ == '__main__':
- run_module_suite()
from numpy.distutils.from_template import process_str
-from numpy.testing import assert_equal, run_module_suite
+from numpy.testing import assert_equal
pyf_src = """
normalized_pyf = normalize_whitespace(pyf)
normalized_expected_pyf = normalize_whitespace(expected_pyf)
assert_equal(normalized_pyf, normalized_expected_pyf)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
-)
+ )
from numpy.testing import (
- run_module_suite, assert_, assert_equal
-)
+ assert_, assert_equal
+ )
ajoin = lambda *paths: join(*((sep,)+paths))
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
-
-
-if __name__ == "__main__":
- run_module_suite()
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
-from numpy.testing import run_module_suite, temppath, assert_
+from numpy.testing import temppath, assert_
simple = """\
[meta]
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
-
-
-if __name__ == '__main__':
- run_module_suite()
import os
import shutil
+import pytest
from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.distutils import ccompiler, customized_ccompiler
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, dec
- )
+from numpy.testing import assert_, assert_equal
from numpy.distutils.system_info import system_info, ConfigParser
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
- @dec.skipif(not HAVE_COMPILER)
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
c = customized_ccompiler()
finally:
os.chdir(previousDir)
- @dec.skipif(not HAVE_COMPILER)
- @dec.skipif('msvc' in repr(ccompiler.new_compiler()))
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
+ @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
+ reason="Fails with MSVC compiler ")
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
-
-
-if __name__ == '__main__':
- run_module_suite()
raise CompileError(msg)
# add commandline flags to dependency file
- with open(obj + '.d', 'a') as f:
- f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))
+ if deps:
+ with open(obj + '.d', 'a') as f:
+ f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
See Also
--------
isnan : Shows which elements are Not a Number.
+
isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
+ Not a Number, positive infinity and negative infinity)
Notes
-----
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
-2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
+2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
-shape. The default dtype is float64.
+shape. The default dtype is float64. ::
-``>>> np.zeros((2, 3))
-array([[ 0., 0., 0.], [ 0., 0., 0.]])``
+ >>> np.zeros((2, 3))
+ array([[ 0., 0., 0.], [ 0., 0., 0.]])
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
- Fast element-wise operations, called :term:`ufuncs`, operate on arrays.
+ Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
>>> x.shape
(3,)
+ big-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the most significant byte first (lowest
+ address) and the least significant byte last (highest address). Common in
+ micro-processors and used for transmission of data over network protocols.
+
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
+ field
+ In a :term:`structured data type`, each sub-type is called a `field`.
+ The `field` has a name (a string), a type (any valid :term:`dtype`, and
+ an optional `title`. See :ref:`arrays.dtypes`
+
Fortran order
See `column-major`
Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
for details.
+ homogenous
+ Describes a block of memory comprised of blocks, each block comprised of
+ items and of the same size, and blocks are interpreted in exactly the
+ same way. In the simplest case each block contains a single item, for
+ instance int32 or float64.
+
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
+ little-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the least significant byte first (lowest
+ address) and the most significant byte last (highest address). Common in
+ x86 processors.
+
mask
A boolean array, used to select only certain elements for an operation::
See *array*.
record array
- An :term:`ndarray` with :term:`structured data type`_ which has been
+ An :term:`ndarray` with :term:`structured data type` which has been
subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
making the fields of its data type to be accessible by attribute.
>>> x[:, 1]
array([2, 4])
+ structure
+ See :term:`structured data type`
+
structured data type
A data type composed of other datatypes
Interfacing to Fortran:
-----------------------
The clear choice to wrap Fortran code is
-`f2py <http://docs.scipy.org/doc/numpy-dev/f2py/>`_.
+`f2py <http://docs.scipy.org/doc/numpy/f2py/>`_.
Pyfort is an older alternative, but not supported any longer.
Fwrap is a newer project that looked promising but isn't being developed any
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
- an exception, fields of :class:`numpy.object` type .. (see
- :ref:`object arrays <arrays.object>`) cannot overlap with other fields,
- because of the risk of clobbering the internal object pointer and then
- dereferencing it.
+ an exception, fields of :class:`numpy.object` type cannot overlap with
+ other fields, because of the risk of clobbering the internal object
+ pointer and then dereferencing it.
The optional 'aligned' value can be set to ``True`` to make the automatic
offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
alignment conditions, the array will have the ``ALIGNED`` :ref:`flag
<numpy.ndarray.flags>` set.
+A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
+aligned dtype or array to a packed one and vice versa. It takes either a dtype
+or structured ndarray as an argument, and returns a copy with fields re-packed,
+with or without padding bytes.
+
.. _titles:
Field Titles
``dtype``. This dtype is similar to a 'union' in C.
Indexing and Assignment to Structured arrays
-=============================================
+============================================
Assigning data to a Structured Array
------------------------------------
tuples, using scalar values, or using other structured arrays.
Assignment from Python Native Types (Tuples)
-```````````````````````````````````````````
+````````````````````````````````````````````
The simplest way to assign values to a structured array is using python tuples.
Each assigned value should be a tuple of length equal to the number of fields
Accessing Multiple Fields
```````````````````````````
-One can index a structured array with a multi-field index, where the index is a
-list of field names::
+One can index and assign to a structured array with a multi-field index, where
+the index is a list of field names.
+
+.. warning::
+ The behavior of multi-field indexes will change from Numpy 1.15 to Numpy
+ 1.16.
- >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'i4'), ('c', 'f8')])
+In Numpy 1.16, the result of indexing with a multi-field index will be a view
+into the original array, as follows::
+
+ >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
- array([(0, 0.0), (0, 0.0), (0, 0.0)],
- dtype={'names':['a','c'], 'formats':['<i8','<f8'], 'offsets':[0,11], 'itemsize':19})
+ array([(0, 0.), (0, 0.), (0, 0.)],
+ dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
+
+Assignment to the view modifies the original array. The view's fields will be
+in the order they were indexed. Note that unlike for single-field indexing, the
+view's dtype has the same itemsize as the original array, and has fields at the
+same offsets as in the original array, and unindexed fields are merely missing.
+
+In Numpy 1.15, indexing an array with a multi-field index returns a copy of
+the result above for 1.16, but with fields packed together in memory as if
+passed through :func:`numpy.lib.recfunctions.repack_fields`. This is the
+behavior since Numpy 1.7.
+
+.. warning::
+ The new behavior in Numpy 1.16 leads to extra "padding" bytes at the
+ location of unindexed fields. You will need to update any code which depends
+ on the data having a "packed" layout. For instance code such as::
+
+ >>> a[['a','c']].view('i8') # will fail in Numpy 1.16
+ ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
+
+ will need to be changed. This code has raised a ``FutureWarning`` since
+ Numpy 1.12.
+
+ The following is a recommended fix, which will behave identically in Numpy
+ 1.15 and Numpy 1.16::
+
+ >>> from numpy.lib.recfunctions import repack_fields
+ >>> repack_fields(a[['a','c']]).view('i8') # supported 1.15 and 1.16
+ array([0, 0, 0])
+
+Assigning to an array with a multi-field index will behave the same in Numpy
+1.15 and Numpy 1.16. In both versions the assignment will modify the original
+array::
+
>>> a[['a', 'c']] = (2, 3)
>>> a
array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)],
dtype=[('a', '<i8'), ('b', '<i4'), ('c', '<f8')])
-The resulting array is a view into the original array, such that assignment to
-the view modifies the original array. The view's fields will be in the order
-they were indexed. Note that unlike for single-field indexing, the view's dtype
-has the same itemsize as the original array, and has fields at the same offsets
-as in the original array, and unindexed fields are merely missing.
-
-Since the view is a structured array itself, it obeys the assignment rules
-described above. For example, this means that one can swap the values of two
-fields using appropriate multi-field indexes::
+This obeys the structured array assignment rules described above. For example,
+this means that one can swap the values of two fields using appropriate
+multi-field indexes::
>>> a[['a', 'c']] = a[['c', 'a']]
Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using
``__array_wrap__`` and ``__array_prepare__``. These two allowed one to
-change the output type of a ufunc, but, in constrast to
+change the output type of a ufunc, but, in contrast to
``__array_ufunc__``, did not allow one to make any changes to the inputs.
It is hoped to eventually deprecate these, but ``__array_wrap__`` is also
used by other numpy functions and methods, such as ``squeeze``, so at the
f.close()
return status
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
cont = 0
finalline = ''
ll = ''
+ commentline = re.compile(
+ r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
break
l = l[:-1]
if not strictf77:
- (l, rl) = split_by_unquoted(l, '!')
- l += ' '
- if rl[:5].lower() == '!f2py': # f2py directive
- l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
+ r = commentline.match(l)
+ if r:
+ l = r.group('line') + ' ' # Strip comments starting with `!'
+ rl = r.group('rest')
+ if rl[:4].lower() == 'f2py': # f2py directive
+ l = l + 4 * ' '
+ r = commentline.match(rl[4:])
+ if r:
+ l = l + r.group('line')
+ else:
+ l = l + rl[4:]
if l.strip() == '': # Skip empty line
cont = 0
continue
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
-def split_by_unquoted(line, characters):
- """
- Splits the line into (line[:i], line[i:]),
- where i is the index of first occurence of one of the characters
- not within quotes, or len(line) if no such index exists
- """
- assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
- r = re.compile(
- r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
- r"(?P<after>{char}.*)\Z".format(
- not_quoted="[^\"'{}]".format(re.escape(characters)),
- char="[{}]".format(re.escape(characters)),
- single_quoted=r"('([^'\\]|(\\.))*')",
- double_quoted=r'("([^"\\]|(\\.))*")'))
- m = r.match(line)
- if m:
- d = m.groupdict()
- return (d["before"], d["after"])
- return (line, "")
def _simplifyargs(argsline):
a = []
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
- _, has_semicolon = split_by_unquoted(line, ";")
- if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
- multilinepattern[0].match(line)):
- # XXX: non-zero reset values need testing
- assert reset == 0, repr(reset)
- # split line on unquoted semicolons
- line, semicolon_line = split_by_unquoted(line, ";")
- while semicolon_line:
- crackline(line, reset)
- line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
- crackline(line, reset)
+ if ';' in line and not (f2pyenhancementspattern[0].match(line) or
+ multilinepattern[0].match(line)):
+ for l in line.split(';'):
+ # XXX: non-zero reset values need testing
+ assert reset == 0, repr(reset)
+ crackline(l, reset)
return
if reset < 0:
groupcounter = 0
def markoutercomma(line, comma=','):
l = ''
f = 0
- before, after = split_by_unquoted(line, comma + '()')
- l += before
- while after:
- if (after[0] == comma) and (f == 0):
- l += '@' + comma + '@'
- else:
- l += after[0]
- if after[0] == '(':
- f += 1
- elif after[0] == ')':
- f -= 1
- before, after = split_by_unquoted(after[1:], comma + '()')
- l += before
- assert not f, repr((f, line, l))
+ cc = ''
+ for c in line:
+ if (not cc or cc == ')') and c == '(':
+ f = f + 1
+ cc = ')'
+ elif not cc and c == '\'' and (not l or l[-1] != '\\'):
+ f = f + 1
+ cc = '\''
+ elif c == cc:
+ f = f - 1
+ if f == 0:
+ cc = ''
+ elif c == comma and f == 0:
+ l = l + '@' + comma + '@'
+ continue
+ l = l + c
+ assert not f, repr((f, line, l, cc))
return l
+
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
if p < 16:
return 8
machine = platform.machine().lower()
- if machine.startswith('power') or machine.startswith('ppc64'):
+ if machine.startswith(('aarch64', 'power', 'ppc64', 's390x')):
if p <= 20:
return 16
else:
fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n",
cb_passed_counter,cb_passed_time);
- fprintf(stderr,"(e) wrapped (Fortran/C) functions (acctual) : %8d msec\n\n",
+ fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n",
passed_call_time-cb_passed_call_time-cb_passed_time);
fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
fprintf(stderr,"Exit status: %d\n",exit_flag);
from numpy import (
array, alltrue, ndarray, zeros, dtype, intp, clongdouble
-)
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, SkipTest
-)
+ )
+from numpy.testing import assert_, assert_equal, SkipTest
from numpy.core.multiarray import typeinfo
from . import util
self.type = Type(%r)
array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
''' % (t, t, t))
-
-if __name__ == "__main__":
- setup_module()
- run_module_suite()
from __future__ import division, absolute_import, print_function
import os
+import pytest
-from numpy.testing import run_module_suite, assert_, dec
+from numpy.testing import assert_
from . import util
_path('src', 'assumed_shape', 'foo_mod.f90'),
]
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
r = self.module.fsum([1, 2])
assert_(r == 3, repr(r))
assert_(r == 3, repr(r))
r = self.module.mod.fsum([1, 2])
assert_(r == 3, repr(r))
-
-if __name__ == "__main__":
- run_module_suite()
import textwrap
import sys
+import pytest
from . import util
-from numpy.testing import run_module_suite, assert_equal, dec
+from numpy.testing import assert_equal
class TestBlockDocString(util.F2PyTest):
code = """
SUBROUTINE FOO()
INTEGER BAR(2, 3)
-
+
COMMON /BLOCK/ BAR
RETURN
END
"""
- @dec.knownfailureif(sys.platform=='win32', msg='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_block_docstring(self):
expected = "'i'-array(2,3)\n"
assert_equal(self.module.block.__doc__, expected)
-
-if __name__ == "__main__":
- run_module_suite()
import math
import textwrap
import sys
+import pytest
import numpy as np
-from numpy.testing import run_module_suite, assert_, assert_equal, dec
+from numpy.testing import assert_, assert_equal
from . import util
end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
- @dec.slow
+ @pytest.mark.slow
def test_docstring(self):
expected = """
a = t(fun,[fun_extra_args])
r = t(a.mth)
assert_(r == 9, repr(r))
- @dec.knownfailureif(sys.platform=='win32',
- msg='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback(self):
def callback(code):
r = f(callback)
assert_(r == 0, repr(r))
- @dec.knownfailureif(sys.platform=='win32',
- msg='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback_array(self):
# See gh-10027
cu = np.zeros((1, 8), 'S1')
f = getattr(self.module, 'string_callback_array')
res = f(callback, cu, len(cu))
assert_(res == 0, repr(res))
-
-
-if __name__ == "__main__":
- run_module_suite()
import os
import sys
+import pytest
+
import numpy as np
from . import util
-from numpy.testing import run_module_suite, assert_array_equal, dec
+from numpy.testing import assert_array_equal
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestCommonBlock(util.F2PyTest):
sources = [_path('src', 'common', 'block.f')]
- @dec.knownfailureif(sys.platform=='win32', msg='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_common_block(self):
self.module.initcb()
assert_array_equal(self.module.block.long_bn,
np.array('2', dtype='|S1'))
assert_array_equal(self.module.block.ok,
np.array(3, dtype=np.int32))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import os
+import pytest
-from numpy.testing import run_module_suite, assert_, dec
+from numpy.testing import assert_
from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
_selected_real_kind_func as selected_real_kind
-)
+ )
from . import util
class TestKind(util.F2PyTest):
sources = [_path('src', 'kind', 'foo.f90')]
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
selectedrealkind = self.module.selectedrealkind
selectedintkind = self.module.selectedintkind
assert_(selectedrealkind(i) in [selected_real_kind(i), -1],
'selectedrealkind(%s): expected %r but got %r' %
(i, selected_real_kind(i), selectedrealkind(i)))
-
-if __name__ == "__main__":
- run_module_suite()
import os
import textwrap
+import pytest
-from numpy.testing import run_module_suite, assert_, assert_equal, dec
+from numpy.testing import assert_, assert_equal
from . import util
_path('src', 'mixed', 'foo_fixed.f90'),
_path('src', 'mixed', 'foo_free.f90')]
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
assert_(self.module.bar11() == 11)
assert_(self.module.foo_fixed.bar12() == 12)
assert_(self.module.foo_free.bar13() == 13)
- @dec.slow
+ @pytest.mark.slow
def test_docstring(self):
expected = """
a = bar11()
"""
assert_equal(self.module.bar11.__doc__,
textwrap.dedent(expected).lstrip())
-
-if __name__ == "__main__":
- run_module_suite()
import os
import math
+import pytest
import numpy as np
-from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
+from numpy.testing import assert_raises, assert_equal
from . import util
_path('src', 'parameter', 'constant_non_compound.f90'),
]
- @dec.slow
+ @pytest.mark.slow
def test_constant_real_single(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
self.module.foo_single(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
- @dec.slow
+ @pytest.mark.slow
def test_constant_real_double(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
self.module.foo_double(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
- @dec.slow
+ @pytest.mark.slow
def test_constant_compound_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
self.module.foo_compound_int(x)
assert_equal(x, [0 + 1 + 2*6, 1, 2])
- @dec.slow
+ @pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3])
- @dec.slow
+ @pytest.mark.slow
def test_constant_integer_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
self.module.foo_int(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
- @dec.slow
+ @pytest.mark.slow
def test_constant_integer_long(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int64)[::2]
self.module.foo_long(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
- @dec.slow
+ @pytest.mark.slow
def test_constant_both(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
self.module.foo(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
- @dec.slow
+ @pytest.mark.slow
def test_constant_no(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
self.module.foo_no(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
- @dec.slow
+ @pytest.mark.slow
def test_constant_sum(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
-
-
-if __name__ == "__main__":
- run_module_suite()
+++ /dev/null
-from __future__ import division, absolute_import, print_function
-
-from . import util
-
-from numpy.testing import run_module_suite, assert_equal, dec
-
-import sys
-
-class TestQuotedCharacter(util.F2PyTest):
- code = """
- SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6)
- CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR
- PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!",
- 1 OPENPAR="(", CLOSEPAR=")")
- CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
-Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
- OUT1 = SINGLE
- OUT2 = DOUBLE
- OUT3 = SEMICOL
- OUT4 = EXCLA
- OUT5 = OPENPAR
- OUT6 = CLOSEPAR
- RETURN
- END
- """
-
- @dec.knownfailureif(sys.platform=='win32', msg='Fails with MinGW64 Gfortran (Issue #9673)')
- def test_quoted_character(self):
- assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')'))
-
-if __name__ == "__main__":
- run_module_suite()
import os
import math
+import pytest
import numpy as np
-from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
+from numpy.testing import assert_raises, assert_equal
from . import util
# Check that intent(in out) translates as intent(inout)
sources = [_path('src', 'regression', 'inout.f90')]
- @dec.slow
+ @pytest.mark.slow
def test_inout(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
x = np.arange(3, dtype=np.float32)
self.module.foo(x)
assert_equal(x, [3, 1, 2])
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
from numpy import array
-from numpy.testing import run_module_suite, assert_, dec
+from numpy.testing import assert_
from . import util
end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t5,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module, name))
end module f90_return_char
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","):
self.check_function(getattr(self.module.f90_return_char, name))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
from numpy import array
from numpy.compat import long
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_raises
from . import util
end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
self.check_function(getattr(self.module, name))
end module f90_return_complex
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
self.check_function(getattr(self.module.f90_return_complex, name))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
from numpy import array
from numpy.compat import long
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_raises
from . import util
end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
self.check_function(getattr(self.module, name))
end module f90_return_integer
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
self.check_function(getattr(self.module.f90_return_integer, name))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
from numpy import array
from numpy.compat import long
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_raises
from . import util
c end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","):
self.check_function(getattr(self.module, name))
end module f90_return_logical
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
self.check_function(getattr(self.module.f90_return_logical, name))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
from numpy import array
from numpy.compat import long
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_raises
from . import util
end python module c_ext_return_real
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t4,t8,s4,s8".split(","):
self.check_function(getattr(self.module, name))
end
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module, name))
end module f90_return_real
"""
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module.f90_return_real, name))
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import os
+import pytest
-from numpy.testing import run_module_suite, assert_equal, dec
+from numpy.testing import assert_equal
from . import util
class TestSizeSumExample(util.F2PyTest):
sources = [_path('src', 'size', 'foo.f90')]
- @dec.slow
+ @pytest.mark.slow
def test_all(self):
r = self.module.foo([[]])
assert_equal(r, [0], repr(r))
r = self.module.foo([[1, 2], [3, 4], [5, 6]])
assert_equal(r, [3, 7, 11], repr(r))
- @dec.slow
+ @pytest.mark.slow
def test_transpose(self):
r = self.module.trans([[]])
assert_equal(r.T, [[]], repr(r))
r = self.module.trans([[1, 2, 3], [4, 5, 6]])
assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r))
- @dec.slow
+ @pytest.mark.slow
def test_flatten(self):
r = self.module.flatten([[]])
assert_equal(r, [], repr(r))
r = self.module.flatten([[1, 2, 3], [4, 5, 6]])
assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import os
+import pytest
-from numpy.testing import run_module_suite, assert_array_equal, dec
+from numpy.testing import assert_array_equal
import numpy as np
from . import util
class TestString(util.F2PyTest):
sources = [_path('src', 'string', 'char.f90')]
- @dec.slow
+ @pytest.mark.slow
def test_char(self):
strings = np.array(['ab', 'cd', 'ef'], dtype='c').T
inp, out = self.module.char_test.change_strings(strings, strings.shape[1])
expected = strings.copy()
expected[1, :] = 'AAA'
assert_array_equal(out, expected)
-
-if __name__ == "__main__":
- run_module_suite()
import textwrap
import re
import random
+import pytest
import numpy.f2py
from numpy.compat import asbytes, asstr
-from numpy.testing import SkipTest, temppath, dec
+from numpy.testing import SkipTest, temppath
from importlib import import_module
try:
module = None
module_name = None
- @dec.knownfailureif(sys.platform=='win32', msg='Fails with MinGW64 Gfortran (Issue #9673)')
def setup(self):
+ if sys.platform == 'win32':
+ raise SkipTest('Fails with MinGW64 Gfortran (Issue #9673)')
+
if self.module is not None:
return
from .fftpack import *
from .helper import *
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
/* Initialization function for the module */
#if PY_MAJOR_VERSION >= 3
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_fftpack_lite(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
initfftpack_lite(void)
#endif
fftpack_module_documentation,
(PyObject*)NULL,PYTHON_API_VERSION);
#endif
+ if (m == NULL) {
+ return RETVAL(NULL);
+ }
/* Import the array object */
import_array();
/* XXXX Add constants here */
- return RETVAL;
+ return RETVAL(m);
}
import numpy as np
from numpy.random import random
from numpy.testing import (
- run_module_suite, assert_array_almost_equal, assert_array_equal,
- assert_raises,
+ assert_array_almost_equal, assert_array_equal, assert_raises,
)
import threading
import sys
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
-
-
-if __name__ == "__main__":
- run_module_suite()
"""
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import run_module_suite, assert_array_almost_equal, assert_equal
+from numpy.testing import assert_array_almost_equal, assert_equal
from numpy import fft, pi
from numpy.fft.helper import _FFTCache
# Another big item - should now be the only item in the cache.
c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))
assert_equal(list(c._dict.keys()), [6])
-
-
-if __name__ == "__main__":
- run_module_suite()
__all__ += nanfunctions.__all__
__all__ += histograms.__all__
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
import os
import sys
+import warnings
import shutil
import io
if "t" in mode:
# BZ2File is missing necessary functions for TextIOWrapper
- raise ValueError("bz2 text files not supported in python2")
- else:
- return bz2.BZ2File(fn, mode)
+ warnings.warn("Assuming latin1 encoding for bz2 text file in Python2",
+ RuntimeWarning, stacklevel=5)
+ mode = mode.replace("t", "")
+ return bz2.BZ2File(fn, mode)
def _python2_gzipopen(fn, mode, encoding, newline):
""" Wrapper to open gzip in text mode.
#
def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None):
+ delimiter = _decode_line(delimiter)
+ comments = _decode_line(comments)
+
self.comments = comments
+
# Delimiter is a character
if (delimiter is None) or isinstance(delimiter, basestring):
delimiter = delimiter or None
Examples
--------
>>> from numpy.lib import NumpyVersion
- >>> if NumpyVersion(np.__version__) < '1.7.0'):
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
arr.round(out=arr)
+def _slice_at_axis(shape, sl, axis):
+ """
+ Construct a slice tuple the length of shape, with sl at the specified axis
+ """
+ slice_tup = (slice(None),)
+ return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
+
+
+def _slice_first(shape, n, axis):
+ """ Construct a slice tuple to take the first n elements along axis """
+ return _slice_at_axis(shape, slice(0, n), axis=axis)
+
+
+def _slice_last(shape, n, axis):
+ """ Construct a slice tuple to take the last n elements along axis """
+ dim = shape[axis] # doing this explicitly makes n=0 work
+ return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
+
+
+def _do_prepend(arr, pad_chunk, axis):
+ return np.concatenate(
+ (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
+
+
+def _do_append(arr, pad_chunk, axis):
+ return np.concatenate(
+ (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
+
+
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
- axis=axis)
- else:
- return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
def _append_const(arr, pad_amt, val, axis=-1):
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
- if val == 0:
- return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
- axis=axis)
- else:
- return np.concatenate(
- (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
+
def _prepend_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _append_edge(arr, pad_amt, axis=-1):
if pad_amt == 0:
return arr
- edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- edge_arr = arr[edge_slice].reshape(pad_singleton)
- return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
- axis=axis)
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
+ edge_arr = arr[edge_slice]
+ return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_first(arr.shape, 1, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, ramp_arr, axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
- edge_slice = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ edge_slice = _slice_last(arr.shape, 1, axis=axis)
- # Extract edge, reshape to original rank, and extend along `axis`
- edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+ # Extract edge, and extend along `axis`
+ edge_pad = arr[edge_slice].repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
- return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+ return _do_append(arr, ramp_arr, axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- max_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _append_max(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- max_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ max_slice = _slice_last(arr.shape, num, axis=axis)
else:
max_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate max, reshape to add singleton dimension back
- max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate max
+ max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- mean_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_first(arr.shape, num, axis=axis)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
- arr), axis=axis)
+ return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- mean_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ mean_slice = _slice_last(arr.shape, num, axis=axis)
else:
mean_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate mean, reshape to add singleton dimension back
- mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate mean
+ mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- med_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+ return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- med_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ med_slice = _slice_last(arr.shape, num, axis=axis)
else:
med_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate median, reshape to add singleton dimension back
- med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate median
+ med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate(
- (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+ return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- min_slice = tuple(slice(None) if i != axis else slice(num)
- for (i, x) in enumerate(arr.shape))
-
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_first(arr.shape, num, axis=axis)
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
- axis=axis)
+ return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
num = None
# Slice a chunk from the edge to calculate stats on
- end = arr.shape[axis] - 1
if num is not None:
- min_slice = tuple(
- slice(None) if i != axis else slice(end, end - num, -1)
- for (i, x) in enumerate(arr.shape))
+ min_slice = _slice_last(arr.shape, num, axis=axis)
else:
min_slice = tuple(slice(None) for x in arr.shape)
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
-
- # Extract slice, calculate min, reshape to add singleton dimension back
- min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+ # Extract slice, calculate min
+ min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
- axis=axis)
+ return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
ref_chunk1 = arr[ref_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- ref_chunk1 = ref_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
- ref_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
ref_chunk2 = arr[ref_slice][rev_idx]
- if pad_amt[1] == 1:
- ref_chunk2 = ref_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
- for (i, x) in enumerate(arr.shape))
- rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
+ rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
sym_chunk1 = arr[sym_slice][rev_idx]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- sym_chunk1 = sym_chunk1.reshape(pad_singleton)
-
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = tuple(slice(None) if i != axis else 0
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice1]
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1]
- end = arr.shape[axis]
- sym_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
sym_chunk2 = arr[sym_slice][rev_idx]
- if pad_amt[1] == 1:
- sym_chunk2 = sym_chunk2.reshape(pad_singleton)
-
if 'odd' in method:
- edge_slice2 = tuple(slice(None) if i != axis else -1
- for (i, x) in enumerate(arr.shape))
- edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
+ edge_chunk = arr[edge_slice2]
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
- start = arr.shape[axis] - pad_amt[0]
- end = arr.shape[axis]
- wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
wrap_chunk1 = arr[wrap_slice]
- # Shape to restore singleton dimension after slicing
- pad_singleton = tuple(x if i != axis else 1
- for (i, x) in enumerate(arr.shape))
- if pad_amt[0] == 1:
- wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
-
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
- for (i, x) in enumerate(arr.shape))
+ wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
wrap_chunk2 = arr[wrap_slice]
- if pad_amt[1] == 1:
- wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
-
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
return ret
-def intersect1d(ar1, ar2, assume_unique=False):
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
Parameters
----------
ar1, ar2 : array_like
- Input arrays.
+ Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the two
+ arrays are returned. The first instance of a value is used if there are
+ multiple. Default is False.
+
+ .. versionadded:: 1.15.0
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
See Also
--------
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
"""
+ ar1 = np.asanyarray(ar1)
+ ar2 = np.asanyarray(ar2)
+
if not assume_unique:
- # Might be faster than unique( intersect1d( ar1, ar2 ) )?
- ar1 = unique(ar1)
- ar2 = unique(ar2)
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
aux = np.concatenate((ar1, ar2))
- aux.sort()
- return aux[:-1][aux[1:] == aux[:-1]]
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
+
def setxor1d(ar1, ar2, assume_unique=False):
"""
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
"""
-Define a simple format for saving numpy arrays to disk with the full
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
Notes
-----
-The ``.npy`` format, including reasons for creating it and a comparison of
-alternatives, is described fully in the "npy-format" NEP.
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the `"npy-format" NEP
+<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
+ 'quantile'
]
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
-
- [[5, 7],
- [4, 6]]])
+ [[5, 7],
+ [4, 6]]])
"""
axes = tuple(axes)
return flip(transpose(m, axes_list), axes[1])
-def flip(m, axis):
+def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
----------
m : array_like
Input array.
- axis : integer
- Axis in array, which entries are reversed.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to flip over. The default,
+ axis=None, will flip over all of the axes of the input array.
+ If axis is negative it counts from the last to the first axis.
+ If axis is a tuple of ints, flipping is performed on all of the axes
+ specified in the tuple.
+
+ .. versionchanged:: 1.15.0
+ None and tuples of axes are supported
Returns
-------
Notes
-----
flip(m, 0) is equivalent to flipud(m).
+
flip(m, 1) is equivalent to fliplr(m).
+
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
+ flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
+ positions.
+
+ flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
+ position 0 and position 1.
+
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
-
[[4, 5],
[6, 7]]])
-
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
-
[[0, 1],
[2, 3]]])
-
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
-
[[6, 7],
[4, 5]]])
-
+ >>> np.flip(A)
+ array([[[7, 6],
+ [5, 4]],
+ [[3, 2],
+ [1, 0]]])
+ >>> np.flip(A, (0, 2))
+ array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
- indexer = [slice(None)] * m.ndim
- try:
- indexer[axis] = slice(None, None, -1)
- except IndexError:
- raise ValueError("axis=%i is invalid for the %i-dimensional input array"
- % (axis, m.ndim))
- return m[tuple(indexer)]
+ if axis is None:
+ indexer = (np.s_[::-1],) * m.ndim
+ else:
+ axis = _nx.normalize_axis_tuple(axis, m.ndim)
+ indexer = [np.s_[:]] * m.ndim
+ for ax in axis:
+ indexer[ax] = np.s_[::-1]
+ indexer = tuple(indexer)
+ return m[indexer]
def iterable(y):
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
- with given values at discrete data-points.
+ with given discrete data points (`xp`, `fp`), evaluated at `x`.
Parameters
----------
x : array_like
- The x-coordinates of the interpolated values.
+ The x-coordinates at which to evaluate the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
- >>> from StringIO import StringIO
+ >>> from io import StringIO
>>> buf = StringIO()
- >>> np.disp('"Display" in a file', device=buf)
+ >>> np.disp(u'"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
+ quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
The different types of interpolation can be visualized graphically:
- ..plot::
+ .. plot::
+
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
- ('linear', None)
- ('higher', '--')
- ('lower', '--')
- ('nearest', '-.')
- ('midpoint', '-.')
+ ('linear', None),
+ ('higher', '--'),
+ ('lower', '--'),
+ ('nearest', '-.'),
+ ('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def quantile(a, q, axis=None, out=None,
+ overwrite_input=False, interpolation='linear', keepdims=False):
+ """
+ Compute the `q`th quantile of the data along the specified axis.
+ ..versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single quantile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the ``q``-th quantile of
+ ``V`` is the value ``q`` of the way from the minimum to the
+ maximum in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the quantile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
+ same as the maximum if ``q=1.0``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([ 7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([[ 6.5, 4.5, 2.5]])
+ >>> m
+ array([[ 6.5, 4.5, 2.5]])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a == b)
+ """
+ q = np.asanyarray(q)
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
from __future__ import division, absolute_import, print_function
import operator
+import warnings
import numpy as np
from numpy.compat.py3k import basestring
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+# range is a keyword argument to many functions, so save the builtin so they can
+# use it.
+_range = range
+
def _hist_bin_sqrt(x):
"""
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
- Freedman-Diaconis and Sturges estimators.
+ Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
+ and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
- estimate tends to be too large for small `x`. The Sturges estimator
- is quite good for small (<1000) datasets and is the default in the R
- language. This method gives good off the shelf behaviour.
+ estimate tends to be too large for small `x` and bad for data with limited
+ variance. The Sturges estimator is quite good for small (<1000) datasets
+ and is the default in the R language. This method gives good off the shelf
+ behaviour.
+
+ .. versionchanged:: 1.15.0
+ If there is limited variance the IQR can be 0, which results in the
+ FD bin width being 0 too. This is not a valid bin width, so
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
+ If the IQR is 0, it's unlikely any variance based estimators will be of
+ use, so we revert to the sturges estimator, which only uses the size of the
+ dataset in its calculation.
Parameters
----------
--------
_hist_bin_fd, _hist_bin_sturges
"""
- # There is no need to check for zero here. If ptp is, so is IQR and
- # vice versa. Either both are zero or neither one is.
- return min(_hist_bin_fd(x), _hist_bin_sturges(x))
-
+ fd_bw = _hist_bin_fd(x)
+ sturges_bw = _hist_bin_sturges(x)
+ if fd_bw:
+ return min(fd_bw, sturges_bw)
+ else:
+ # limited variance, so we return a len dependent bw estimator
+ return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
return first_edge, last_edge
+def _unsigned_subtract(a, b):
+ """
+ Subtract two values where a >= b, and produce an unsigned result
+
+ This is needed when finding the difference between the upper and lower
+ bound of an int16 histogram
+ """
+ # coerce to a single type
+ signed_to_unsigned = {
+ np.byte: np.ubyte,
+ np.short: np.ushort,
+ np.intc: np.uintc,
+ np.int_: np.uint,
+ np.longlong: np.ulonglong
+ }
+ dt = np.result_type(a, b)
+ try:
+ dt = signed_to_unsigned[dt.type]
+ except KeyError:
+ return np.subtract(a, b, dtype=dt)
+ else:
+ # we know the inputs are integers, and we are deliberately casting
+ # signed to unsigned
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
+
+
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
- n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
- ``np.round(np.ceil(range / h))`.
+ ``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
return bin_edges
-def histogram(a, bins=10, range=None, normed=False, weights=None,
+def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
.. deprecated:: 1.6.0
- This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
- behavior. It will be removed in NumPy 2.0.0. Use the ``density``
- keyword instead. If ``False``, the result will contain the
- number of samples in each bin. If ``True``, the result is the
- value of the probability *density* function at the bin,
- normalized such that the *integral* over the range is 1. Note
- that this latter behavior is known to be buggy with unequal bin
- widths; use ``density`` instead.
+ This is equivalent to the `density` argument, but produces incorrect
+ results for unequal bin widths. It should not be used.
+
+ .. versionchanged:: 1.15.0
+ DeprecationWarnings are actually emitted.
+
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
- norm = n_equal_bins / (last_edge - first_edge)
+ norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
- f_indices = (tmp_a - first_edge) * norm
+ f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
- for i in np.arange(0, len(a), BLOCK):
+ for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
# density overrides the normed keyword
if density is not None:
- normed = False
+ if normed is not None:
+ # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
+ warnings.warn(
+ "The normed argument is ignored when density is provided. "
+ "In future passing both will result in an error.",
+ DeprecationWarning, stacklevel=2)
+ normed = None
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
- # deprecated, buggy behavior. Remove for NumPy 2.0.0
+ # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
+ warnings.warn(
+ "Passing `normed=True` on non-uniform bins has always been "
+ "broken, and computes neither the probability density "
+ "function nor the probability mass function. "
+ "The result is only correct if the bins are uniform, when "
+ "density=True will produce the same result anyway. "
+ "The argument will be removed in a future version of "
+ "numpy.",
+ np.VisibleDeprecationWarning, stacklevel=2)
+
+ # this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
+ if normed is not None:
+ # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
+ warnings.warn(
+ "Passing normed=False is deprecated, and has no effect. "
+ "Consider passing the density argument instead.",
+ DeprecationWarning, stacklevel=2)
return n, bin_edges
-def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
+def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
+ density=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
- sample : array_like
- The data to be histogrammed. It must be an (N,D) array or data
- that can be converted to such. The rows of the resulting array
- are the coordinates of points in a D dimensional polytope.
+ sample : (N, D) array, or (D, N) array_like
+ The data to be histogrammed.
+
+ Note the unusual interpretation of sample when an array_like:
+
+ * When an array, each row is a coordinate in a D-dimensional space -
+ such as ``histogramgramdd(np.array([p1, p2, p3]))``.
+ * When an array_like, each element is the list of values for single
+ coordinate - such as ``histogramgramdd((X, Y, Z))``.
+
+ The first form should be preferred.
+
bins : sequence or int, optional
The bin specification:
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
- A sequence of lower and upper bin edges to be used if the edges are
- not given explicitly in `bins`. Defaults to the minimum and maximum
- values along each dimension.
+ A sequence of length D, each an optional (lower, upper) tuple giving
+ the outer bin edges to be used if the edges are not given explicitly in
+ `bins`.
+ An entry of None in the sequence results in the minimum and maximum
+ values being used for the corresponding dimension.
+ The default, None, is equivalent to passing a tuple of D None values.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_volume``.
normed : bool, optional
- If False, returns the number of samples in each bin. If True,
- returns the bin density ``bin_count / sample_count / bin_volume``.
+ An alias for the density argument that behaves identically. To avoid
+ confusion with the broken normed argument to `histogram`, `density`
+ should be preferred.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
# bins is an integer
bins = D*[bins]
- # Select range for each dimension
- # Used only if number of bins is given.
+ # normalize the range argument
if range is None:
- # Handle empty input. Range can't be determined in that case, use 0-1.
- if N == 0:
- smin = np.zeros(D)
- smax = np.ones(D)
- else:
- smin = np.atleast_1d(np.array(sample.min(0), float))
- smax = np.atleast_1d(np.array(sample.max(0), float))
- else:
- if not np.all(np.isfinite(range)):
- raise ValueError(
- 'range parameter must be finite.')
- smin = np.zeros(D)
- smax = np.zeros(D)
- for i in np.arange(D):
- smin[i], smax[i] = range[i]
-
- # Make sure the bins have a finite width.
- for i in np.arange(len(smin)):
- if smin[i] == smax[i]:
- smin[i] = smin[i] - .5
- smax[i] = smax[i] + .5
-
- # avoid rounding issues for comparisons when dealing with inexact types
- if np.issubdtype(sample.dtype, np.inexact):
- edge_dt = sample.dtype
- else:
- edge_dt = float
+ range = (None,) * D
+ elif len(range) != D:
+ raise ValueError('range argument must have one entry per dimension')
+
# Create edge arrays
- for i in np.arange(D):
- if np.isscalar(bins[i]):
+ for i in _range(D):
+ if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
- "Element at index %s in `bins` should be a positive "
- "integer." % i)
- nbin[i] = bins[i] + 2 # +2 for outlier bins
- edges[i] = np.linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
+ '`bins[{}]` must be positive, when an integer'.format(i))
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
+ edges[i] = np.linspace(smin, smax, bins[i] + 1)
+ elif np.ndim(bins[i]) == 1:
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
+ raise ValueError(
+ '`bins[{}]` must be monotonically increasing, when an array'
+ .format(i))
else:
- edges[i] = np.asarray(bins[i], edge_dt)
- nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
- dedges[i] = np.diff(edges[i])
- if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
- "Found bin edge of size <= 0. Did you specify `bins` with"
- "non-monotonic sequence?")
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
- nbin = np.asarray(nbin)
-
- # Handle empty input.
- if N == 0:
- return np.zeros(nbin-2), edges
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
+ dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
- Ncount = {}
- for i in np.arange(D):
- Ncount[i] = np.digitize(sample[:, i], edges[i])
+ Ncount = tuple(
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
+ for i in _range(D)
+ )
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
- for i in np.arange(D):
- # Rounding precision
- mindiff = dedges[i].min()
- if not np.isinf(mindiff):
- decimal = int(-np.log10(mindiff)) + 6
- # Find which points are on the rightmost edge.
- not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (np.around(sample[:, i], decimal) ==
- np.around(edges[i][-1], decimal))
- # Shift these points one bin to the left.
- Ncount[i][np.nonzero(on_edge & not_smaller_than_edge)[0]] -= 1
-
- # Flattened histogram matrix (1D)
- # Reshape is used so that overlarge arrays
- # will raise an error.
- hist = np.zeros(nbin, float).reshape(-1)
+ for i in _range(D):
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
- ni = nbin.argsort()
- xy = np.zeros(N, int)
- for i in np.arange(0, D-1):
- xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
- xy += Ncount[ni[-1]]
+ # This raises an error if the array is too large.
+ xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
- if len(xy) == 0:
- return np.zeros(nbin-2, int), edges
-
- flatcount = np.bincount(xy, weights)
- a = np.arange(len(flatcount))
- hist[a] = flatcount
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
- hist = hist.reshape(np.sort(nbin))
- for i in np.arange(nbin.size):
- j = ni.argsort()[i]
- hist = hist.swapaxes(i, j)
- ni[i], ni[j] = ni[j], ni[i]
+ hist = hist.reshape(nbin)
+
+ # This preserves the (bad) behavior observed in gh-7845, for now.
+ hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
- # Normalize if normed is True
- if normed:
+ # handle the aliasing normed argument
+ if normed is None:
+ if density is None:
+ density = False
+ elif density is None:
+ # an explicit normed argument was passed, alias it to the new name
+ density = normed
+ else:
+ raise TypeError("Cannot specify both 'normed' and 'density'")
+
+ if density:
+ # calculate the probability density function
s = hist.sum()
- for i in np.arange(D):
+ for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
- nn[k] = nn[k][slobj]
+ nn[k] = nn[k][tuple(slobj)]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
- arrays as described in :ref:`A Mechanism for Overriding Ufuncs
- <neps.ufunc-overrides>`.
+ arrays as described in `A Mechanism for Overriding Ufuncs
+ <../../neps/nep-0013-ufunc-overrides.html>`_.
As an trivial example, consider this implementation of an ``ArrayLike``
class that simply wraps a NumPy array and ensures that the result of any
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
- 'nancumsum', 'nancumprod'
+ 'nancumsum', 'nancumprod', 'nanquantile'
]
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
- use when the desired quantile lies between two data points
+ use when the desired percentile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * 'linear': ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * 'lower': ``i``.
+ * 'higher': ``j``.
+ * 'nearest': ``i`` or ``j``, whichever is nearest.
+ * 'midpoint': ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
nanmean
nanmedian : equivalent to ``nanpercentile(..., 50)``
percentile, median, mean
+ nanquantile : equivalent to nanpercentile, but with q in the range [0, 1].
Notes
-----
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
+ interpolation='linear', keepdims=np._NoValue):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[ 10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ nan
+ >>> np.nanquantile(a, 0.5)
+ 3.5
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([ 6.5, 2., 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[ 7.],
+ [ 2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([ 6.5, 2., 2.5])
+ >>> m
+ array([ 6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([ 7., 2.])
+ >>> assert not np.all(a==b)
+ """
+ a = np.asanyarray(a)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, interpolation, keepdims)
+
+
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""Assumes that q is in [0, 1], and is an ndarray"""
if sys.version_info[0] >= 3:
import pickle
+ from collections.abc import Mapping
else:
import cPickle as pickle
from future_builtins import map
+ from collections import Mapping
def loads(*args, **kwargs):
This also enables tab-completion in an interpreter or IPython.
"""
- return object.__getattribute__(self, '_obj').keys()
+ return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
return zipfile.ZipFile(file, *args, **kwargs)
-class NpzFile(object):
+class NpzFile(Mapping):
"""
NpzFile(fid)
def __del__(self):
self.close()
+ # Implement the Mapping ABC
+ def __iter__(self):
+ return iter(self.files)
+
+ def __len__(self):
+ return len(self.files)
+
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
- member = 0
+ member = False
if key in self._files:
- member = 1
+ member = True
elif key in self.files:
- member = 1
+ member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
else:
raise KeyError("%s is not a file in the archive" % key)
- def __iter__(self):
- return iter(self.files)
-
- def items(self):
- """
- Return a list of tuples, with each tuple (filename, array in file).
-
- """
- return [(f, self[f]) for f in self.files]
-
- def iteritems(self):
- """Generator that returns tuples (filename, array in file)."""
- for f in self.files:
- yield (f, self[f])
- def keys(self):
- """Return files in the archive with a ``.npy`` extension."""
- return self.files
+ if sys.version_info.major == 3:
+ # deprecate the python 2 dict apis that we supported by accident in
+ # python 3. We forgot to implement itervalues() at all in earlier
+ # versions of numpy, so no need to deprecated it here.
- def iterkeys(self):
- """Return an iterator over the files in the archive."""
- return self.__iter__()
+ def iteritems(self):
+ # Numpy 1.15, 2018-02-20
+ warnings.warn(
+ "NpzFile.iteritems is deprecated in python 3, to match the "
+ "removal of dict.itertems. Use .items() instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.items()
- def __contains__(self, key):
- return self.files.__contains__(key)
+ def iterkeys(self):
+ # Numpy 1.15, 2018-02-20
+ warnings.warn(
+ "NpzFile.iterkeys is deprecated in python 3, to match the "
+ "removal of dict.iterkeys. Use .keys() instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.keys()
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
Notes
-----
- For a description of the ``.npy`` format, see the module docstring
- of `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
- description of the ``.npy`` format, see `numpy.lib.format` or the
- NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
- `numpy.lib.format` or the NumPy Enhancement Proposal
- http://numpy.github.io/neps/npy-format.html
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
- return lambda x: complex(asstr(x))
+ return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
- comment. For backwards compatibility, byte strings will be decoded as
- 'latin1'. The default is '#'.
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
- >>> c = StringIO("0 1\\n2 3")
+ >>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
- >>> d = StringIO("M 21 72\\nF 35 58")
+ >>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
- >>> c = StringIO("1,0,2\\n3,0,4")
+ >>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
if encoding is not None:
fencoding = encoding
# we must assume local encoding
- # TOOD emit portability warning?
+ # TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
- a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
- like `' (%s+%sj)' % (fmt, fmt)`
- b) a full string specifying every real and imaginary part, e.g.
- `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
- c) a list of specifiers, one per column - in this case, the real
- and imaginary part must have separate specifiers,
- e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
+
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
+ like `' (%s+%sj)' % (fmt, fmt)`
+ * a full string specifying every real and imaginary part, e.g.
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
+ * a list of specifiers, one per column - in this case, the real
+ and imaginary part must have separate specifiers,
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
for number in row:
row2.append(number.real)
row2.append(number.imag)
- fh.write(format % tuple(row2) + newline)
+ s = format % tuple(row2) + newline
+ fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
Comma delimited file with mixed dtype
- >>> s = StringIO("1,1.3,abcde")
+ >>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
An example with fixed-width columns
- >>> s = StringIO("11.3abcde")
+ >>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
- if names is True:
+ if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
- if fval in comments:
- del first_values[0]
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
- Or a square matrix object:
-
- >>> np.poly(np.matrix(P))
- array([ 1. , 0. , 0.16666667])
-
Note how in all cases the leading coefficient is always 1.
"""
Notes
-----
* Without a mask, the missing value will be filled with something,
- * depending on what its corresponding type:
- -1 for integers
- -1.0 for floating point numbers
- '-' for characters
- '-1' for strings
- True for boolean values
+ depending on what its corresponding type:
+
+ * ``-1`` for integers
+ * ``-1.0`` for floating point numbers
+ * ``'-'`` for characters
+ * ``'-1'`` for strings
+ * ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
+def repack_fields(a, align=False, recurse=False):
+ """
+ Re-pack the fields of a structured array or dtype in memory.
+
+ The memory layout of structured datatypes allows fields at arbitrary
+ byte offsets. This means the fields can be separated by padding bytes,
+ their offsets can be non-monotonically increasing, and they can overlap.
+
+ This method removes any overlaps and reorders the fields in memory so they
+ have increasing byte offsets, and adds or removes padding bytes depending
+ on the `align` option, which behaves like the `align` option to `np.dtype`.
+
+ If `align=False`, this method produces a "packed" memory layout in which
+ each field starts at the byte the previous field ended, and any padding
+ bytes are removed.
+
+ If `align=True`, this methods produces an "aligned" memory layout in which
+ each field's offset is a multiple of its alignment, and the total itemsize
+ is a multiple of the largest alignment, by adding padding bytes as needed.
+
+ Parameters
+ ----------
+ a : ndarray or dtype
+ array or dtype for which to repack the fields.
+ align : boolean
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
+ recurse : boolean
+ If True, also repack nested structures.
+
+ Returns
+ -------
+ repacked : ndarray or dtype
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
+ needed.
+
+ Examples
+ --------
+
+ >>> def print_offsets(d):
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
+ ... print("itemsize:", d.itemsize)
+ ...
+ >>> dt = np.dtype('u1,i4,f4', align=True)
+ >>> dt
+ dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True)
+ >>> print_offsets(dt)
+ offsets: [0, 4, 8]
+ itemsize: 16
+ >>> packed_dt = repack_fields(dt)
+ >>> packed_dt
+ dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')])
+ >>> print_offsets(packed_dt)
+ offsets: [0, 1, 5]
+ itemsize: 13
+
+ """
+ if not isinstance(a, np.dtype):
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
+ return a.astype(dt, copy=False)
+
+ if a.names is None:
+ return a
+
+ fieldinfo = []
+ for name in a.names:
+ tup = a.fields[name]
+ if recurse:
+ fmt = repack_fields(tup[0], align=align, recurse=True)
+ else:
+ fmt = tup[0]
+
+ if len(tup) == 3:
+ name = (tup[2], name)
+
+ fieldinfo.append((name, fmt))
+
+ dt = np.dtype(fieldinfo, align=align)
+ return np.dtype((a.type, dt))
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
--------
>>> np.set_printoptions(precision=4)
- >>> np.emath.arctanh(np.matrix(np.eye(2)))
+ >>> np.emath.arctanh(np.eye(2))
array([[ Inf, 0.],
[ 0., Inf]])
>>> np.emath.arctanh([1j])
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
- 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
+ 'put_along_axis'
]
+def _make_along_axis_idx(arr_shape, indices, axis):
+ # compute dimensions to iterate over
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
+ raise IndexError('`indices` must be an integer array')
+ if len(arr_shape) != indices.ndim:
+ raise ValueError(
+ "`indices` and `arr` must have the same number of dimensions")
+ shape_ones = (1,) * indices.ndim
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
+
+ # build a fancy index, consisting of orthogonal aranges, with the
+ # requested index inserted at the right location
+ fancy_index = []
+ for dim, n in zip(dest_dims, arr_shape):
+ if dim is None:
+ fancy_index.append(indices)
+ else:
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+ return tuple(fancy_index)
+
+
+def take_along_axis(arr, indices, axis):
+ """
+ Take values from the input array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to look up values in the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Source array
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to take along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
+ against `arr`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the input array is
+ treated as if it had first been flattened to 1d, for consistency with
+ `sort` and `argsort`.
+
+ Returns
+ -------
+ out: ndarray (Ni..., J, Nk...)
+ The indexed result.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+ out = np.empty(Nk + (J,) + Nk)
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ out_1d = out [ii + s_[:,] + kk]
+ for j in range(J):
+ out_1d[j] = a_1d[indices_1d[j]]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ out_1d[:] = a_1d[indices_1d]
+
+ See Also
+ --------
+ take : Take along an axis, using the same indices for every 1d slice
+ put_along_axis :
+ Put values into the destination array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can sort either by using sort directly, or argsort and this function
+
+ >>> np.sort(a, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+ >>> ai = np.argsort(a, axis=1); ai
+ array([[0, 2, 1],
+ [1, 2, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+
+ The same works for max and min, if you expand the dimensions:
+
+ >>> np.expand_dims(np.max(a, axis=1), axis=1)
+ array([[30],
+ [60]])
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[30],
+ [60]])
+
+ If we want to get the max and min at the same time, we can stack the
+ indices first
+
+ >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
+ >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.concatenate([ai_min, ai_max], axis=axis)
+ >> ai
+ array([[0, 1],
+ [1, 0]], dtype=int64)
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 30],
+ [40, 60]])
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ arr_shape = (len(arr),) # flatiter has no .shape
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def put_along_axis(arr, indices, values, axis):
+ """
+ Put values into the destination array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to place values into the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr: ndarray (Ni..., M, Nk...)
+ Destination array.
+ indices: ndarray (Ni..., J, Nk...)
+ Indices to change along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+ against `arr`.
+ values: array_like (Ni..., J, Nk...)
+ values to insert at those indices. Its shape and dimension are
+ broadcast to match that of `indices`.
+ axis: int
+ The axis to take 1d slices along. If axis is None, the destination
+ array is treated as if a flattened 1d view had been created of it.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ values_1d = values [ii + s_[:,] + kk]
+ for j in range(J):
+ a_1d[indices_1d[j]] = values_1d[j]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ a_1d[indices_1d] = values_1d
+
+ See Also
+ --------
+ take_along_axis :
+ Take values from the input array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can replace the maximum values with:
+
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0]], dtype=int64)
+ >>> np.put_along_axis(a, ai, 99, axis=1)
+ >>> a
+ array([[10, 99, 20],
+ [99, 40, 50]])
+
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ axis = 0
+ arr_shape = (len(arr),) # flatiter has no .shape
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
needs_writeable = not readonly and array.flags.writeable
extras = ['reduce_ok'] if needs_writeable else []
op_flag = 'readwrite' if needs_writeable else 'readonly'
- broadcast = np.nditer(
+ it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
- op_flags=[op_flag], itershape=shape, order='C').itviews[0]
+ op_flags=[op_flag], itershape=shape, order='C')
+ with it:
+ # never really has writebackifcopy semantics
+ broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if needs_writeable and not result.flags.writeable:
result.flags.writeable = True
import os
import sys
+import pytest
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
+import numpy.lib._datasource as datasource
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises, SkipTest,
+ assert_, assert_equal, assert_raises, assert_warns, SkipTest
)
-import numpy.lib._datasource as datasource
if sys.version_info[0] >= 3:
import urllib.request as urllib_request
old_urlopen = None
-def setup():
+def setup_module():
global old_urlopen
old_urlopen = urllib_request.urlopen
urllib_request.urlopen = urlopen_stub
-def teardown():
+def teardown_module():
urllib_request.urlopen = old_urlopen
# A valid website for more robust testing
fp.close()
assert_equal(magic_line, result)
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
+ def test_Bz2File_text_mode_warning(self):
+ try:
+ import bz2
+ except ImportError:
+ # We don't have the bz2 capabilities to test.
+ raise SkipTest
+ # Test datasource's internal file_opener for BZip2 files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
+ fp = bz2.BZ2File(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ with assert_warns(RuntimeWarning):
+ fp = self.ds.open(filepath, 'rt')
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
class TestDataSourceExists(object):
def setup(self):
fp = datasource.open(local_file)
assert_(fp)
fp.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_allclose, assert_raises,
+ assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ # gh-11028 bytes comment/delimiters should get encoded
+ strg = b" 1,2,3,4,,5 % test"
+ test = LineSplitter(delimiter=b',', comments=b'%')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = " 1 2 3 4 5 # test"
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
-
-if __name__ == "__main__":
- run_module_suite()
"""
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_, run_module_suite, assert_raises
+from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
-
-
-if __name__ == "__main__":
- run_module_suite()
)
assert_allclose(test, expected)
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
-
-
-if __name__ == "__main__":
- np.testing.run_module_suite()
import numpy as np
import sys
-from numpy.testing import (
- run_module_suite, assert_array_equal, assert_equal, assert_raises,
- )
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
ed = np.array([1, 2, 5])
c = intersect1d(a, b)
assert_array_equal(c, ed)
-
assert_array_equal([], intersect1d([], []))
+ def test_intersect1d_array_like(self):
+ # See gh-11772
+ class Test(object):
+ def __array__(self):
+ return np.arange(3)
+
+ a = Test()
+ res = intersect1d(a, a)
+ assert_array_equal(res, a)
+ res = intersect1d([1, 2, 3], [1, 2, 3])
+ assert_array_equal(res, [1, 2, 3])
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
- assert(isinstance(ediff1d(np.matrix(1)), np.matrix))
- assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
assert_array_equal(uniq[:, inv], data)
msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
-
-
-if __name__ == "__main__":
- run_module_suite()
# Check that all elements are iterated correctly
assert_(list(c.flat) == list(d.flat))
-
-if __name__ == '__main__':
- from numpy.testing import run_module_suite
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_almost_equal, assert_allclose,
- assert_equal, assert_raises
-)
+ assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises
+ )
class TestFinancial(object):
Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+# doctest
r''' Test the .npy file format.
Set up:
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
-
import sys
import os
import shutil
import tempfile
import warnings
+import pytest
from io import BytesIO
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_array_equal, assert_raises, raises,
- dec, SkipTest
+ assert_, assert_array_equal, assert_raises, raises, SkipTest
)
from numpy.lib import format
assert_array_equal(long_str_arr, long_str_arr2)
-@dec.slow
+@pytest.mark.slow
def test_memmap_roundtrip():
- # Fixme: test crashes nose on windows.
+ # Fixme: used to crash on windows
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
assert_raises(ValueError, format.write_array, f, d, (1, 0))
-@dec.slow
+@pytest.mark.slow
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
assert_array_equal(r, d)
-@dec.slow
-@dec.skipif(np.dtype(np.intp).itemsize < 8, "test requires 64-bit system")
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+ reason="test requires 64-bit system")
+@pytest.mark.slow
def test_large_archive():
# Regression test for product of saving arrays with dimensions of array
# having a product that doesn't fit in int32. See gh-7598 for details.
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
-
-
-if __name__ == "__main__":
- run_module_suite()
import warnings
import sys
import decimal
+import pytest
import numpy as np
from numpy import ma
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
- dec, suppress_warnings,
-)
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ HAS_REFCOUNT,
+ )
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
unwrap, unique, vectorize
-)
+ )
from numpy.compat import long
class TestFlip(object):
def test_axes(self):
- assert_raises(ValueError, np.flip, np.ones(4), axis=1)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
assert_equal(np.flip(a, i),
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
+
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
class TestAny(object):
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
- y6 = np.matrix(rand(5, 5))
- assert_array_equal(y6.mean(0), average(y6, 0))
-
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
- # also test matrices
- a = np.matrix([[1,2],[3,4]])
- w = np.matrix([[1,2],[3,4]])
-
- r = np.average(a, axis=0, weights=w)
- assert_equal(type(r), np.matrix)
- assert_equal(r, [[2.5, 10.0/3]])
-
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
class TestUnwrap(object):
def test_simple(self):
- # check that unwrap removes jumps greather that 2*pi
+ # check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
- # check that unwrap maintans continuity
+ # check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
- def test_matrix(self):
- # Test to make sure matrices give the same answer as ndarrays
- x = np.linspace(0, 5)
- y = x * x
- r = trapz(y, x)
- mx = np.matrix(x)
- my = np.matrix(y)
- mr = trapz(my, mx)
- assert_almost_equal(mr, r)
-
class TestSinc(object):
"must not be negative",
lambda: np.bincount(x, minlength=-1))
- @dec._needs_refcount
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
+class TestQuantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+
class TestMedian(object):
def test_basic(self):
class TestAdd_newdoc(object):
- @dec.skipif(sys.flags.optimize == 2)
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
- dec, suppress_warnings, HAS_REFCOUNT,
-)
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ )
class TestHistogram(object):
assert_allclose(e, np.array([1., 2.]))
def test_normed(self):
- # Check that the integral of the density equals 1.
- n = 100
- v = np.random.rand(n)
- a, b = histogram(v, normed=True)
- area = np.sum(a * np.diff(b))
- assert_almost_equal(area, 1)
+ sup = suppress_warnings()
+ with sup:
+ rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
+ # Check that the integral of the density equals 1.
+ n = 100
+ v = np.random.rand(n)
+ a, b = histogram(v, normed=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+ assert_equal(len(rec), 1)
- # Check with non-constant bin widths (buggy but backwards
- # compatible)
- v = np.arange(10)
- bins = [0, 1, 5, 9, 10]
- a, b = histogram(v, bins, normed=True)
- area = np.sum(a * np.diff(b))
- assert_almost_equal(area, 1)
+ sup = suppress_warnings()
+ with sup:
+ rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
+ # Check with non-constant bin widths (buggy but backwards
+ # compatible)
+ v = np.arange(10)
+ bins = [0, 1, 5, 9, 10]
+ a, b = histogram(v, bins, normed=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+ assert_equal(len(rec), 1)
def test_density(self):
# Check that the integral of the density equals 1.
assert_array_equal(a, .1)
assert_equal(np.sum(a * np.diff(b)), 1)
+ # Test that passing False works too
+ a, b = histogram(v, bins, density=False)
+ assert_array_equal(a, [1, 2, 3, 4])
+
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
assert_equal(h.sum(), 9)
# Normalization
- h, b = histogram(a, range=[1, 9], normed=True)
+ h, b = histogram(a, range=[1, 9], density=True)
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
# Weights
w = np.arange(10) + .5
- h, b = histogram(a, range=[1, 9], weights=w, normed=True)
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
assert_equal((h * np.diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
h, b = histogram(a)
assert_(np.issubdtype(h.dtype, np.integer))
- h, b = histogram(a, normed=True)
+ h, b = histogram(a, density=True)
assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
v = np.random.rand(100)
w = np.ones(100) * 5
a, b = histogram(v)
- na, nb = histogram(v, normed=True)
+ na, nb = histogram(v, density=True)
wa, wb = histogram(v, weights=w)
- nwa, nwb = histogram(v, weights=w, normed=True)
+ nwa, nwb = histogram(v, weights=w, density=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram(
- [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True)
+ [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
# Check weights with non-uniform bin widths
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
- # the internal commparisons with NaN give warnings
+ # the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
assert_equal(d_edge.dtype, dates.dtype)
assert_equal(t_edge.dtype, td)
+ def do_signed_overflow_bounds(self, dtype):
+ exponent = 8 * np.dtype(dtype).itemsize - 1
+ arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
+ hist, e = histogram(arr, bins=2)
+ assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
+ assert_equal(hist, [1, 1])
+
+ def test_signed_overflow_bounds(self):
+ self.do_signed_overflow_bounds(np.byte)
+ self.do_signed_overflow_bounds(np.short)
+ self.do_signed_overflow_bounds(np.intc)
+ self.do_signed_overflow_bounds(np.int_)
+ self.do_signed_overflow_bounds(np.longlong)
+
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
assert_equal(len(a), numbins, err_msg="{0} estimator, "
"No Variance test".format(estimator))
+ def test_limited_variance(self):
+ """
+ Check when IQR is 0, but variance exists, we return the sturges value
+ and not the fd value.
+ """
+ lim_var_data = np.ones(1000)
+ lim_var_data[:3] = 0
+ lim_var_data[-4:] = 100
+
+ edges_auto = histogram_bin_edges(lim_var_data, 'auto')
+ assert_equal(edges_auto, np.linspace(0, 100, 12))
+
+ edges_fd = histogram_bin_edges(lim_var_data, 'fd')
+ assert_equal(edges_fd, np.array([0, 100]))
+
+ edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
+ assert_equal(edges_sturges, np.linspace(0, 100, 12))
+
def test_outlier(self):
"""
Check the FD, Scott and Doane with outliers.
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
- H, edges = histogramdd(x, bins=ed, normed=True)
+ H, edges = histogramdd(x, bins=ed, density=True)
assert_(np.all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4),
range=[[-1, 1], [0, 3], [0, 4]],
- normed=True)
+ density=True)
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
def test_weights(self):
v = np.random.rand(100, 2)
hist, edges = histogramdd(v)
- n_hist, edges = histogramdd(v, normed=True)
+ n_hist, edges = histogramdd(v, density=True)
w_hist, edges = histogramdd(v, weights=np.ones(100))
assert_array_equal(w_hist, hist)
- w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True)
+ w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
x = np.arange(8).reshape(2, 4)
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
- assert_raises(
- ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
- assert_(hist[1] == 1.)
+ assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
-
-if __name__ == "__main__":
- run_module_suite()
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
+
+ def test_density_non_uniform_2d(self):
+ # Defines the following grid:
+ #
+ # 0 2 8
+ # 0+-+-----+
+ # + | +
+ # + | +
+ # 6+-+-----+
+ # 8+-+-----+
+ x_edges = np.array([0, 2, 8])
+ y_edges = np.array([0, 6, 8])
+ relative_areas = np.array([
+ [3, 9],
+ [1, 3]])
+
+ # ensure the number of points in each region is proportional to its area
+ x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
+ y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
+
+ # sanity check that the above worked as intended
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
+ assert_equal(hist, relative_areas)
+
+ # resulting histogram should be uniform, since counts and areas are propotional
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
+ assert_equal(hist, 1 / (8*8))
+
+ def test_density_non_uniform_1d(self):
+ # compare to histogram to show the results are the same
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_raises_regex
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_raises_regex
)
from numpy.lib.index_tricks import (
- mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
+ mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
index_exp, ndindex, r_, s_, ix_
)
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
0.2*np.ones(20, 'd'), 11)
+ def test_sparse(self):
+ grid_full = mgrid[-1:1:10j, -2:2:10j]
+ grid_sparse = ogrid[-1:1:10j, -2:2:10j]
+
+ # sparse grids can be made dense by broadcasting
+ grid_broadcast = np.broadcast_arrays(*grid_sparse)
+ for f, b in zip(grid_full, grid_broadcast):
+ assert_equal(f, b)
+
class TestConcatenator(object):
def test_1d(self):
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
- def test_matrix(self):
- a = [1, 2]
- b = [3, 4]
-
- ab_r = np.r_['r', a, b]
- ab_c = np.r_['c', a, b]
-
- assert_equal(type(ab_r), np.matrix)
- assert_equal(type(ab_c), np.matrix)
-
- assert_equal(np.array(ab_r), [[1,2,3,4]])
- assert_equal(np.array(ab_c), [[1],[2],[3],[4]])
-
- assert_raises(ValueError, lambda: np.r_['rc', a, b])
-
- def test_matrix_scalar(self):
- r = np.r_['r', [1, 2], 3]
- assert_equal(type(r), np.matrix)
- assert_equal(np.array(r), [[1,2,3]])
-
- def test_matrix_builder(self):
- a = np.array([1])
- b = np.array([2])
- c = np.array([3])
- d = np.array([4])
- actual = np.r_['a, b; c, d']
- expected = np.bmat([[a, b], [c, d]])
-
- assert_equal(actual, expected)
- assert_equal(type(actual), type(expected))
-
def test_0d(self):
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
-def test_fill_diagonal():
- a = np.zeros((3, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5]]))
-
- #Test tall matrix
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0],
- [0, 0, 0]]))
-
- #Test tall matrix wrap
- a = np.zeros((10, 3), int)
- fill_diagonal(a, 5, True)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0],
- [0, 0, 5],
- [0, 0, 0],
- [5, 0, 0],
- [0, 5, 0]]))
-
- #Test wide matrix
- a = np.zeros((3, 10), int)
- fill_diagonal(a, 5)
- yield (assert_array_equal, a,
- np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
-
- # The same function can operate on a 4-d array:
- a = np.zeros((3, 3, 3, 3), int)
- fill_diagonal(a, 4)
- i = np.array([0, 1, 2])
- yield (assert_equal, np.where(a != 0), (i, i, i, i))
+class TestFillDiagonal(object):
+ def test_basic(self):
+ a = np.zeros((3, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+ )
+
+ def test_tall_matrix(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ )
+
+ def test_tall_matrix_wrap(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5, True)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]])
+ )
+
+ def test_wide_matrix(self):
+ a = np.zeros((3, 10), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
+ )
+
+ def test_operate_4d_array(self):
+ a = np.zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = np.array([0, 1, 2])
+ assert_equal(np.where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = np.array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
- [13, 14, 15, 16]])
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
a[di] = 100
- yield (assert_array_equal, a,
- np.array([[100, 2, 3, 4],
- [5, 100, 7, 8],
- [9, 10, 100, 12],
- [13, 14, 15, 100]]))
+ assert_array_equal(
+ a, np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]])
+ )
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = np.zeros((2, 2, 2), int)
a[d3] = 1
- yield (assert_array_equal, a,
- np.array([[[1, 0],
- [0, 0]],
-
- [[0, 0],
- [0, 1]]]))
+ assert_array_equal(
+ a, np.array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+ )
def test_diag_indices_from():
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
-
-
-if __name__ == "__main__":
- run_module_suite()
import gzip
import os
import threading
-from tempfile import NamedTemporaryFile
import time
import warnings
import gc
import io
+import re
+import pytest
+from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
-import re
import numpy as np
import numpy.ma as ma
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- run_module_suite, assert_warns, assert_, SkipTest,
- assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal, temppath, tempdir, dec, IS_PYPY, suppress_warnings,
-)
+ assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
+ assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
+ )
class TextIO(BytesIO):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
- @dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
+ @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
- @dec.slow
+ @pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
- @dec.skipif(not IS_64BIT, "Works only with 64bit systems")
- @dec.slow
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ @pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
fp.seek(0)
assert_(not fp.closed)
- @dec.skipif(IS_PYPY, "context manager required on PyPy")
+ #FIXME: Is this still true?
+ @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+
+
+
def test_custom_writer(self):
class CustomWriter(list):
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
- @dec.skipif(MAJVER == 2)
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
- @dec.skipif(MAJVER == 2 or not HAS_BZ2)
+ @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(bz2.open, ('.bz2',))
- @dec.skipif(MAJVER == 2 or not HAS_LZMA)
+ @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
+ @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
- @dec.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968')
+ @pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
+ reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
- import locale
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
assert_equal(test['f2'], 1024)
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage(object):
# Test that pathlib.Path can be used
- @dec.skipif(Path is None, "No pathlib.Path")
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
x = np.loadtxt(path)
assert_array_equal(x, a)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_save_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npy') as path:
data = np.load(path)
assert_array_equal(data, a)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
-
- @dec.skipif(Path is None, "No pathlib.Path")
+
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
assert_array_equal(data['lab'], 'place holder')
data.close()
- @dec.skipif(Path is None, "No pathlib.Path")
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
data = np.genfromtxt(path)
assert_array_equal(a, data)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
test = np.ndfromtxt(path, dtype=int)
assert_array_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
- @dec.skipif(Path is None, "No pathlib.Path")
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
assert_('x' in z.keys())
-@dec._needs_refcount
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
np.savez(f, [1, 2, 3])
f.seek(0)
- assert_(gc.isenabled())
- gc.disable()
- try:
- gc.collect()
+ with assert_no_gc_cycles():
np.load(f)
- # gc.collect returns the number of unreachable objects in cycles that
- # were found -- we are checking that no cycles were created by np.load
- n_objects_in_cycles = gc.collect()
- finally:
- gc.enable()
- assert_equal(n_objects_in_cycles, 0)
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises
- )
+from numpy.testing import assert_, assert_equal, assert_raises
PY2 = sys.version_info.major < 3
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_almost_equal,
- assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
+ assert_, assert_equal, assert_almost_equal, assert_no_warnings,
+ assert_raises, assert_array_equal, suppress_warnings
)
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
# check that rows of nan are dealt with for subclasses (#4628)
- mat[1] = np.nan
+ mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
- and not np.isnan(res[2, 0]))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine)
+ assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
class TestNanFunctions_IntTypes(object):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
- def test_matrices(self):
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
# Check that it works and that type and
# shape are preserved
- mat = np.matrix(np.eye(3))
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
for f in self.nanfuncs:
- res = f(mat, axis=0)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (1, 3))
- res = f(mat, axis=1)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 1))
- res = f(mat)
- assert_(np.isscalar(res))
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
- def test_matrices(self):
- # Check that it works and that type and
- # shape are preserved
- mat = np.matrix(np.eye(3))
- for f in self.nanfuncs:
- for axis in np.arange(2):
- res = f(mat, axis=axis)
- assert_(isinstance(res, np.matrix))
- assert_(res.shape == (3, 3))
- res = f(mat)
- assert_(res.shape == (1, 3*3))
-
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
-if __name__ == "__main__":
- run_module_suite()
+class TestNanFunctions_Quantile(object):
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, interpolation="midpoint")
+ assert_array_equal(p, p0)
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (
- assert_array_equal, assert_equal, assert_raises, run_module_suite
-)
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
def test_packbits():
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
-
-
-if __name__ == "__main__":
- run_module_suite()
-from __future__ import division, absolute_import, print_function
-
'''
>>> p = np.poly1d([1.,2,3])
>>> p
-poly1d([ 1., 2., 3.])
+poly1d([1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
-poly1d([ 3., 2., 1.])
+poly1d([3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
86.0
>>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
+poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
-(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
+(poly1d([0.33333333]), poly1d([1.33333333, 2.66666667]))
>>> p + q
-poly1d([ 4., 4., 4.])
+poly1d([4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
+poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
+poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
+poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
-array([ 1., 2., 3.])
+array([1., 2., 3.])
>>> len(p)
2
(3.0, 2.0, 1.0, 0)
>>> p.integ()
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
-poly1d([ 0.33333333, 1. , 3. , 0. ])
+poly1d([0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
-poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
+poly1d([0.00039683, 0.00277778, 0.025 , 0. , 0. ,
+ 0. , 0. , 0. ])
>>> p.deriv()
-poly1d([ 2., 2.])
+poly1d([2., 2.])
>>> p.deriv(2)
-poly1d([ 2.])
+poly1d([2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([ 0.]))
+(poly1d([ 1., -1.]), poly1d([0.]))
'''
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, rundocs
)
p.coeffs[2] += 10
assert_equal(p.coeffs, [1, 2, 3])
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
-from numpy.testing import (
- run_module_suite, assert_, assert_raises, dec
- )
+from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
- find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
- )
+ find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
+ repack_fields)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
+ def test_repack_fields(self):
+ dt = np.dtype('u1,f4,i8', align=True)
+ a = np.zeros(2, dtype=dt)
+
+ assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
+ assert_equal(repack_fields(a).itemsize, 13)
+ assert_equal(repack_fields(repack_fields(dt), align=True), dt)
+
+ # make sure type is preserved
+ dt = np.dtype((np.record, dt))
+ assert_(repack_fields(dt).type is np.record)
+
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
- @dec.knownfailureif(True)
+ @pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
-
-if __name__ == '__main__':
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises, _assert_valid_refcount,
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, _assert_valid_refcount,
)
from numpy.compat import unicode
raise AssertionError()
finally:
out.close()
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
import warnings
+import functools
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile, expand_dims,
+ vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
+ put_along_axis
)
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
- assert_warns
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis(object):
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, dict()),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis(object):
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+
class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
- # this test is particularly malicious because matrix
- # refuses to become 1d
def double(row):
return row * 2
- m = np.matrix([[0, 1], [2, 3]])
- expected = np.matrix([[0, 2], [4, 6]])
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
- assert_(isinstance(result, np.matrix))
+ assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:,None]).view(cls)
def test_axis_insertion_ma(self):
def f1to2(x):
- """produces an assymmetric non-square matrix from x"""
+ """produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:,None]
return np.ma.masked_where(res%5==0, res)
class TestKron(object):
def test_return_type(self):
- a = np.ones([2, 2])
- m = np.asmatrix(a)
- assert_equal(type(kron(a, a)), np.ndarray)
- assert_equal(type(kron(m, m)), np.matrix)
- assert_equal(type(kron(a, m)), np.matrix)
- assert_equal(type(kron(m, a)), np.matrix)
-
class myarray(np.ndarray):
__array_priority__ = 0.0
+ a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
- run_module_suite, assert_equal, assert_array_equal,
- assert_raises, assert_
+ assert_equal, assert_array_equal, assert_raises, assert_
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
-)
+ )
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
from numpy.testing import (
- run_module_suite, assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises,
)
from numpy import (
- arange, add, fliplr, flipud, zeros, ones, eye, array, diag,
- histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
- tril_indices, tril_indices_from, vander,
+ arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
+ tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
+ tril_indices_from, vander,
)
import numpy as np
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(
- x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True)
+ x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
answer = array(
[[0., 0, 0, 0, 0],
[0, 1, 0, 1, 0],
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
- def test_norm(self):
+ def test_density(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
- x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True)
+ x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
answer = array([[1, 1, .5],
[1, 1, .5],
[.5, .5, .25]])/9.
def test_binparameter_combination(self):
x = array(
- [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
- [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
- [[ 2., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 1., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 1.]])
+ [[2., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [1., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
- [[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
- [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
- yield assert_array_equal, b, [[1, 0], [1, 1]]
- yield assert_array_equal, c, b.T
+ assert_array_equal(b, [[1, 0], [1, 1]])
+ assert_array_equal(c, b.T)
# should return the same dtype as the original array
- yield assert_equal, b.dtype, a.dtype
- yield assert_equal, c.dtype, a.dtype
+ assert_equal(b.dtype, a.dtype)
+ assert_equal(c.dtype, a.dtype)
def test_tril_triu_ndim3():
], dtype=dtype)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
- yield assert_array_equal, a_triu_observed, a_triu_desired
- yield assert_array_equal, a_tril_observed, a_tril_desired
- yield assert_equal, a_triu_observed.dtype, a.dtype
- yield assert_equal, a_tril_observed.dtype, a.dtype
+ assert_array_equal(a_triu_observed, a_triu_desired)
+ assert_array_equal(a_tril_observed, a_tril_desired)
+ assert_equal(a_triu_observed.dtype, a.dtype)
+ assert_equal(a_tril_observed.dtype, a.dtype)
+
def test_tril_triu_with_inf():
# Issue 4859
[16, -8, 4, -2, 1],
[81, 27, 9, 3, 1]])
# Check default value of N:
- yield (assert_array_equal, v, powers[:, 1:])
+ assert_array_equal(v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
- yield (assert_array_equal, v, powers[:, m-n:m])
+ assert_array_equal(v, powers[:, m-n:m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
- yield (assert_array_equal, v, expected)
+ assert_array_equal(v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
- yield (assert_array_equal, v, expected)
-
-
-if __name__ == "__main__":
- run_module_suite()
+ assert_array_equal(v, expected)
import numpy as np
from numpy.compat import long
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises
+ assert_, assert_equal, assert_array_equal, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
# other numpy function
assert_raises(TypeError,
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal, assert_warns
+ assert_, assert_equal, assert_array_equal, assert_warns
)
obj.metadata = self.metadata
return obj
+ def __array_finalize__(self, obj):
+ self.metadata = getattr(obj, 'metadata', None)
+ return self
+
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
out = np.array(0.0)
actual = np.fix(x, out=out)
assert_(actual is out)
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import sys
+import pytest
+
from numpy.core import arange
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises_regex, dec
- )
+from numpy.testing import assert_, assert_equal, assert_raises_regex
from numpy.lib import deprecate
import numpy.lib.utils as utils
from StringIO import StringIO
-@dec.skipif(sys.flags.optimize == 2)
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
def test_assert_raises_regex_context_manager():
with assert_raises_regex(ValueError, 'no deprecation warning'):
raise ValueError('no deprecation warning')
-
-
-if __name__ == "__main__":
- run_module_suite()
return v
-def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
+def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
+ density=None):
"""
Compute the bi-dimensional histogram of two data samples.
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_area``.
normed : bool, optional
- If False, returns the number of samples in each bin. If True,
- returns the bin density ``bin_count / sample_count / bin_area``.
+ An alias for the density argument that behaves identically. To avoid
+ confusion with the broken normed argument to `histogram`, `density`
+ should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
N = 1
if N != 1 and N != 2:
- xedges = yedges = asarray(bins, float)
+ xedges = yedges = asarray(bins)
bins = [xedges, yedges]
- hist, edges = histogramdd([x, y], bins, range, normed, weights)
+ hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
from .linalg import *
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
static integer c__2 = 2;
static integer c__0 = 0;
static integer c__65 = 65;
-static real c_b894 = 1.f;
+static integer c__9 = 9;
+static integer c__6 = 6;
+static real c_b328 = 0.f;
+static real c_b1034 = 1.f;
static integer c__12 = 12;
static integer c__49 = 49;
-static real c_b1087 = 0.f;
-static integer c__9 = 9;
-static real c_b1136 = -1.f;
+static real c_b1276 = -1.f;
static integer c__13 = 13;
static integer c__15 = 15;
static integer c__14 = 14;
static integer c__16 = 16;
static logical c_false = FALSE_;
static logical c_true = TRUE_;
-static real c_b2023 = .5f;
+static real c_b2435 = .5f;
/* Subroutine */ int cgebak_(char *job, char *side, integer *n, integer *ilo,
integer *ihi, real *scale, integer *m, complex *v, integer *ldv,
} /* cgelqf_ */
-/* Subroutine */ int cgeqr2_(integer *m, integer *n, complex *a, integer *lda,
- complex *tau, complex *work, integer *info)
+/* Subroutine */ int cgelsd_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, real *s, real *rcond,
+ integer *rank, complex *work, integer *lwork, real *rwork, integer *
+ iwork, integer *info)
{
/* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
- complex q__1;
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
/* Local variables */
- static integer i__, k;
- static complex alpha;
- extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
- , integer *, complex *, complex *, integer *, complex *),
- clarfg_(integer *, complex *, complex *, integer *, complex *),
- xerbla_(char *, integer *);
+ static integer ie, il, mm;
+ static real eps, anrm, bnrm;
+ static integer itau, nlvl, iascl, ibscl;
+ static real sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clalsd_(
+ char *, integer *, integer *, integer *, real *, real *, complex *
+ , integer *, real *, integer *, complex *, real *, integer *,
+ integer *), clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), cunmbr_(char *, char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), cunmlq_(char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ static integer liwork, minwrk, maxwrk;
+ static real smlnum;
+ static integer lrwork;
+ static logical lquery;
+ static integer nrwork, smlsiz;
/*
- -- LAPACK routine (version 3.2.2) --
+ -- LAPACK driver routine (version 3.2) --
-- LAPACK is a software package provided by Univ. of Tennessee, --
-- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
- June 2010
+ November 2006
Purpose
=======
- CGEQR2 computes a QR factorization of a complex m by n matrix A:
- A = Q * R.
+ CGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder tranformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
Arguments
=========
M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
+ The number of rows of the matrix A. M >= 0.
N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
+ The number of columns of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
A (input/output) COMPLEX array, dimension (LDA,N)
- On entry, the m by n matrix A.
- On exit, the elements on and above the diagonal of the array
- contain the min(m,n) by n upper trapezoidal matrix R (R is
- upper triangular if m >= n); the elements below the diagonal,
- with the array TAU, represent the unitary matrix Q as a
- product of elementary reflectors (see Further Details).
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
+ The leading dimension of the array A. LDA >= max(1,M).
- TAU (output) COMPLEX array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution matrix X.
+ If m >= n and RANK = n, the residual sum-of-squares for
+ the solution in the i-th column is given by the sum of
+ squares of the modulus of elements n+1:m in that column.
- WORK (workspace) COMPLEX array, dimension (N)
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M,N).
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
- Further Details
- ===============
+ RCOND (input) REAL
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
- The matrix Q is represented as a product of elementary reflectors
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
- Q = H(1) H(2) . . . H(k), where k = min(m,n).
+ WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK))
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
- Each H(i) has the form
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 2 * N + N * NRHS
+ if M is greater than or equal to N or
+ 2 * M + M * NRHS
+ if M is less than N, the code will execute correctly.
+ For good performance, LWORK should generally be larger.
- H(i) = I - tau * v * v'
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the array WORK and the
+ minimum sizes of the arrays RWORK and IWORK, and returns
+ these values as the first entries of the WORK, RWORK and
+ IWORK arrays, and no error message related to LWORK is issued
+ by XERBLA.
- where tau is a complex scalar, and v is a complex vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
- and tau in TAU(i).
+ RWORK (workspace) REAL array, dimension (MAX(1,LRWORK))
+ LRWORK >=
+ 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS +
+ MAX( (SMLSIZ+1)**2, N*(1+NRHS) + 2*NRHS )
+ if M is greater than or equal to N or
+ 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS +
+ MAX( (SMLSIZ+1)**2, N*(1+NRHS) + 2*NRHS )
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+ On exit, if INFO = 0, RWORK(1) returns the minimum LRWORK.
+
+ IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK))
+ LIWORK >= max(1, 3*MINMN*NLVL + 11*MINMN),
+ where MINMN = MIN( M,N ).
+ On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
=====================================================================
- Test the input arguments
+ Test the input arguments.
*/
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
a -= a_offset;
- --tau;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
--work;
+ --rwork;
+ --iwork;
/* Function Body */
*info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
} else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("CGEQR2", &i__1);
- return 0;
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
}
- k = min(*m,*n);
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
- i__1 = k;
- for (i__ = 1; i__ <= i__1; ++i__) {
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ liwork = 1;
+ lrwork = 1;
+ if (minmn > 0) {
+ smlsiz = ilaenv_(&c__9, "CGELSD", " ", &c__0, &c__0, &c__0, &c__0,
+ (ftnlen)6, (ftnlen)1);
+ mnthr = ilaenv_(&c__6, "CGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = (integer) (log((real) minmn / (real) (smlsiz + 1)) / log(
+ 2.f)) + 1;
+ nlvl = max(i__1,0);
+ liwork = minmn * 3 * nlvl + minmn * 11;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
-/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+/*
+ Path 1a - overdetermined, with many more rows than
+ columns.
+*/
- i__2 = *m - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- clarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
- , &c__1, &tau[i__]);
- if (i__ < *n) {
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "CGEQRF", " ", m, n,
+ &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "CUNMQR", "LC",
+ m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
-/* Apply H(i)' to A(i:m,i+1:n) from the left */
+/*
+ Path 1 - overdetermined or exactly determined.
- i__2 = i__ + i__ * a_dim1;
- alpha.r = a[i__2].r, alpha.i = a[i__2].i;
- i__2 = i__ + i__ * a_dim1;
- a[i__2].r = 1.f, a[i__2].i = 0.f;
- i__2 = *m - i__ + 1;
- i__3 = *n - i__;
- r_cnjg(&q__1, &tau[i__]);
- clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &q__1,
- &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
- i__2 = i__ + i__ * a_dim1;
- a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = smlsiz + 1;
+ i__1 = i__3 * i__3, i__2 = *n * (*nrhs + 1) + (*nrhs << 1);
+ lrwork = *n * 10 + (*n << 1) * smlsiz + (*n << 3) * nlvl +
+ smlsiz * 3 * *nrhs + max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (mm + *n) * ilaenv_(&c__1,
+ "CGEBRD", " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", &mm, nrhs, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n << 1) + *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = (*n << 1) + mm, i__2 = (*n << 1) + *n * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = smlsiz + 1;
+ i__1 = i__3 * i__3, i__2 = *n * (*nrhs + 1) + (*nrhs << 1);
+ lrwork = *m * 10 + (*m << 1) * smlsiz + (*m << 3) * nlvl +
+ smlsiz * 3 * *nrhs + max(i__1,i__2);
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "CUNMBR", "QLC", m, nrhs, m, &c_n1,
+ (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "CUNMLQ", "LC", n, nrhs, m, &c_n1,
+ (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+/*
+ XXX: Ensure the Path 2a case below is triggered. The workspace
+ calculation should use queries for all routines eventually.
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4),
+ i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4)
+ ;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined. */
+
+ maxwrk = (*m << 1) + (*n + *m) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", m, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*m << 1) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = (*m << 1) + *n, i__2 = (*m << 1) + *m * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ iwork[1] = liwork;
+ rwork[1] = (real) lrwork;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
}
-/* L10: */
}
- return 0;
-/* End of CGEQR2 */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
-} /* cgeqr2_ */
+/* Quick return if possible. */
-/* Subroutine */ int cgeqrf_(integer *m, integer *n, complex *a, integer *lda,
- complex *tau, complex *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
- /* Local variables */
- static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
- integer *, complex *, complex *, integer *), clarfb_(char *, char
- *, char *, char *, integer *, integer *, integer *, complex *,
- integer *, complex *, integer *, complex *, integer *, complex *,
- integer *), clarft_(char *, char *
- , integer *, integer *, complex *, integer *, complex *, complex *
- , integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
+/* Get machine parameters. */
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
-/*
- -- LAPACK routine (version 3.2) --
- -- LAPACK is a software package provided by Univ. of Tennessee, --
- -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
- November 2006
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
- Purpose
- =======
+/* Scale matrix norm up to SMLNUM */
- CGEQRF computes a QR factorization of a complex M-by-N matrix A:
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b56, &c_b56, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b328, &c_b328, &s[1], &c__1)
+ ;
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure B(M+1:N,:) = 0 */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b56, &c_b56, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (RWorkspace: need N)
+ (CWorkspace: need N, prefer N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (RWorkspace: need N)
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b56, &c_b56, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+ ie = 1;
+ nrwork = ie + *n;
+
+/*
+ Bidiagonalize R in A.
+ (RWorkspace: need N)
+ (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,i__2)) {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (CWorkspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b56, &c_b56, &work[il + ldwork], &
+ ldwork);
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (RWorkspace: need M)
+ (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b56, &c_b56, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize A.
+ (RWorkspace: need M)
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ iwork[1] = liwork;
+ rwork[1] = (real) lrwork;
+ return 0;
+
+/* End of CGELSD */
+
+} /* cgelsd_ */
+
+/* Subroutine */ int cgeqr2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.2.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ June 2010
+
+
+ Purpose
+ =======
+
+ CGEQR2 computes a QR factorization of a complex m by n matrix A:
A = Q * R.
Arguments
The number of columns of the matrix A. N >= 0.
A (input/output) COMPLEX array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
+ On entry, the m by n matrix A.
On exit, the elements on and above the diagonal of the array
- contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the unitary matrix Q as a
- product of min(m,n) elementary reflectors (see Further
- Details).
+ product of elementary reflectors (see Further Details).
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
The scalar factors of the elementary reflectors (see Further
Details).
- WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK))
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,N).
- For optimum performance LWORK >= N*NB, where NB is
- the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
+ WORK (workspace) COMPLEX array, dimension (N)
INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
Further Details
===============
/* Function Body */
*info = 0;
- nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
- 1);
- lwkopt = *n * nb;
- work[1].r = (real) lwkopt, work[1].i = 0.f;
- lquery = *lwork == -1;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*lda < max(1,*m)) {
*info = -4;
- } else if (*lwork < max(1,*n) && ! lquery) {
- *info = -7;
}
if (*info != 0) {
i__1 = -(*info);
- xerbla_("CGEQRF", &i__1);
- return 0;
- } else if (lquery) {
+ xerbla_("CGEQR2", &i__1);
return 0;
}
-/* Quick return if possible */
-
k = min(*m,*n);
- if (k == 0) {
- work[1].r = 1.f, work[1].i = 0.f;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *n;
- if (nb > 1 && nb < k) {
-/*
- Determine when to cross over from blocked to unblocked code.
-
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQRF", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < k) {
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
-/* Determine if workspace is large enough for blocked code. */
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
- ldwork = *n;
- iws = ldwork * nb;
- if (*lwork < iws) {
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
- nb = *lwork / ldwork;
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CGEQR2 */
+
+} /* cgeqr2_ */
+
+/* Subroutine */ int cgeqrf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ CGEQRF computes a QR factorization of a complex M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK))
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
/* Computing MAX */
i__1 = 2, i__2 = ilaenv_(&c__2, "CGEQRF", " ", m, n, &c_n1, &
c_n1, (ftnlen)6, (ftnlen)1);
sigma = rmax / anrm;
}
if (iscale == 1) {
- clascl_(uplo, &c__0, &c__0, &c_b894, &sigma, n, n, &a[a_offset], lda,
- info);
+ clascl_(uplo, &c__0, &c__0, &c_b1034, &sigma, n, n, &a[a_offset], lda,
+ info);
}
/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
i__3 = i__ - 1;
q__1.r = -1.f, q__1.i = -0.f;
cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ * a_dim1
- + 1], lda, &work[1], &ldwork, &c_b894, &a[a_offset], lda);
+ + 1], lda, &work[1], &ldwork, &c_b1034, &a[a_offset], lda);
/*
Copy superdiagonal elements back into A, and diagonal
i__3 = *n - i__ - nb + 1;
q__1.r = -1.f, q__1.i = -0.f;
cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ + nb +
- i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b894, &a[
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b1034, &a[
i__ + nb + (i__ + nb) * a_dim1], lda);
/*
}
l = *m * *n + 1;
- sgemm_("N", "N", m, n, n, &c_b894, &rwork[1], m, &b[b_offset], ldb, &
- c_b1087, &rwork[l], m);
+ sgemm_("N", "N", m, n, n, &c_b1034, &rwork[1], m, &b[b_offset], ldb, &
+ c_b328, &rwork[l], m);
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
i__2 = *m;
}
/* L60: */
}
- sgemm_("N", "N", m, n, n, &c_b894, &rwork[1], m, &b[b_offset], ldb, &
- c_b1087, &rwork[l], m);
+ sgemm_("N", "N", m, n, n, &c_b1034, &rwork[1], m, &b[b_offset], ldb, &
+ c_b328, &rwork[l], m);
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
i__2 = *m;
n1p1 = n1 + 1;
if (*rho < 0.f) {
- sscal_(&n2, &c_b1136, &z__[n1p1], &c__1);
+ sscal_(&n2, &c_b1276, &z__[n1p1], &c__1);
}
/* Normalize z so that norm(z) = 1 */
} /* clahr2_ */
+/* Subroutine */ int clals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, complex *b, integer *ldb, complex *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1,
+ givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset,
+ bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static real dj;
+ static integer nlp1, jcol;
+ static real temp;
+ static integer jrow;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static real diflj, difrj, dsigj;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), sgemv_(char *, integer *, integer *, real *
+ , real *, integer *, real *, integer *, real *, real *, integer *), csrot_(integer *, complex *, integer *, complex *,
+ integer *, real *, real *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ static real dsigjp;
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ CLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) COMPLEX array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) COMPLEX array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) REAL array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) REAL array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) REAL array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) REAL array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ RWORK (workspace) REAL array, dimension
+ ( K*(1+NRHS) + 2*NRHS )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ ccopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ ccopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ csscal_(nrhs, &c_b1276, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0.f || poles[j + (poles_dim1 << 1)] == 0.f) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj
+ / (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ rwork[1] = -1.f;
+ temp = snrm2_(k, &rwork[1], &c__1);
+
+/*
+ Since B and BX are complex, the following call to SGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO,
+ $ B( J, 1 ), LDB )
+*/
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * bx_dim1;
+ rwork[i__] = bx[i__4].r;
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemv_("T", k, nrhs, &c_b1034, &rwork[*k + 1 + (*nrhs << 1)],
+ k, &rwork[1], &c__1, &c_b328, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&bx[jrow + jcol * bx_dim1]);
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemv_("T", k, nrhs, &c_b1034, &rwork[*k + 1 + (*nrhs << 1)],
+ k, &rwork[1], &c__1, &c_b328, &rwork[*k + 1 + *nrhs],
+ &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * b_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L90: */
+ }
+ clascl_("G", &c__0, &c__0, &temp, &c_b1034, &c__1, nrhs, &b[j
+ + b_dim1], ldb, info);
+/* L100: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ ccopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.f) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L110: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + (poles_dim1 << 1)];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L120: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO,
+ $ BX( J, 1 ), LDBX )
+*/
+
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[i__] = b[i__4].r;
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemv_("T", k, nrhs, &c_b1034, &rwork[*k + 1 + (*nrhs << 1)],
+ k, &rwork[1], &c__1, &c_b328, &rwork[*k + 1], &c__1);
+ i__ = *k + (*nrhs << 1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L150: */
+ }
+/* L160: */
+ }
+ sgemv_("T", k, nrhs, &c_b1034, &rwork[*k + 1 + (*nrhs << 1)],
+ k, &rwork[1], &c__1, &c_b328, &rwork[*k + 1 + *nrhs],
+ &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * bx_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ bx[i__3].r = q__1.r, bx[i__3].i = q__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ ccopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ csrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ ccopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ ccopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L190: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &r__1);
+/* L200: */
+ }
+ }
+
+ return 0;
+
+/* End of CLALS0 */
+
+} /* clals0_ */
+
+/* Subroutine */ int clalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, complex *b, integer *ldb, complex *bx, integer *ldbx,
+ real *u, integer *ldu, real *vt, integer *k, real *difl, real *difr,
+ real *z__, real *poles, integer *givptr, integer *givcol, integer *
+ ldgcol, integer *perm, real *givnum, real *c__, real *s, real *rwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag, jreal,
+ inode, ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer ndimr;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), clals0_(integer *, integer *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, real *, real *, real *,
+ integer *), xerbla_(char *, integer *), slasdt_(integer *
+ , integer *, integer *, integer *, integer *, integer *, integer *
+ );
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ CLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, CLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, CLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by CLALSA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) COMPLEX array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M.
+ On output, B contains the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) COMPLEX array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) REAL array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) REAL array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) REAL array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ RWORK (workspace) REAL array, dimension at least
+ MAX( (SMLSZ+1)*NRHS*3, N*(1+NRHS) + 2*NRHS ).
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 170.
+*/
+
+ if (*icompq == 1) {
+ goto L170;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L10: */
+ }
+/* L20: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b1034, &u[nlf + u_dim1], ldu, &
+ rwork[(nl * *nrhs << 1) + 1], &nl, &c_b328, &rwork[1], &nl);
+ j = nl * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L30: */
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b1034, &u[nlf + u_dim1], ldu, &
+ rwork[(nl * *nrhs << 1) + 1], &nl, &c_b328, &rwork[nl * *nrhs
+ + 1], &nl);
+ jreal = 0;
+ jimag = nl * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b1034, &u[nrf + u_dim1], ldu, &
+ rwork[(nr * *nrhs << 1) + 1], &nr, &c_b328, &rwork[1], &nr);
+ j = nr * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L90: */
+ }
+/* L100: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b1034, &u[nrf + u_dim1], ldu, &
+ rwork[(nr * *nrhs << 1) + 1], &nr, &c_b328, &rwork[nr * *nrhs
+ + 1], &nr);
+ jreal = 0;
+ jimag = nr * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ ccopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L140: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L150: */
+ }
+/* L160: */
+ }
+ goto L330;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L170:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L180: */
+ }
+/* L190: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to SGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1034, &vt[nlf + vt_dim1],
+ ldu, &rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b328, &rwork[
+ 1], &nlp1);
+ j = nlp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1034, &vt[nlf + vt_dim1],
+ ldu, &rwork[(nlp1 * *nrhs << 1) + 1], &nlp1, &c_b328, &rwork[
+ nlp1 * *nrhs + 1], &nlp1);
+ jreal = 0;
+ jimag = nlp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1034, &vt[nrf + vt_dim1],
+ ldu, &rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b328, &rwork[
+ 1], &nrp1);
+ j = nrp1 * *nrhs << 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1034, &vt[nrf + vt_dim1],
+ ldu, &rwork[(nrp1 * *nrhs << 1) + 1], &nrp1, &c_b328, &rwork[
+ nrp1 * *nrhs + 1], &nrp1);
+ jreal = 0;
+ jimag = nrp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* L320: */
+ }
+
+L330:
+
+ return 0;
+
+/* End of CLALSA */
+
+} /* clalsa_ */
+
+/* Subroutine */ int clalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, complex *b, integer *ldb, real *rcond,
+ integer *rank, complex *work, real *rwork, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static real r__;
+ static integer s, u, z__;
+ static real cs;
+ static integer bx;
+ static real sn;
+ static integer st, vt, nm1, st1;
+ static real eps;
+ static integer iwk;
+ static real tol;
+ static integer difl, difr;
+ static real rcnd;
+ static integer jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow, irwu,
+ jimag, jreal;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer irwib;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer poles, sizei, irwrb, nsize;
+ extern /* Subroutine */ int csrot_(integer *, complex *, integer *,
+ complex *, integer *, real *, real *);
+ static integer irwvt, icmpq1, icmpq2;
+ extern /* Subroutine */ int clalsa_(integer *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, real *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+ , real *, integer *, integer *), clascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, complex *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), claset_(char *, integer *, integer
+ *, complex *, complex *, complex *, integer *), xerbla_(
+ char *, integer *), slascl_(char *, integer *, integer *,
+ real *, real *, integer *, integer *, real *, integer *, integer *
+ );
+ extern integer isamax_(integer *, real *, integer *);
+ static integer givcol;
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+ );
+ static real orgnrm;
+ static integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ static integer givptr, nrwork, irwwrk, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.2.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ June 2010
+
+
+ Purpose
+ =======
+
+ CLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) REAL array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input/output) REAL array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) REAL
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) COMPLEX array, dimension (N * NRHS).
+
+ RWORK (workspace) REAL array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS +
+ MAX( (SMLSIZ+1)**2, N*(1+NRHS) + 2*NRHS ),
+ where
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+
+ IWORK (workspace) INTEGER array, dimension (3*N*NLVL + 11*N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute a singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0.f || *rcond >= 1.f) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ claset_("A", &c__1, nrhs, &c_b56, &c_b56, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ clascl_("G", &c__0, &c__0, &d__[1], &c_b1034, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ csrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ rwork[(i__ << 1) - 1] = cs;
+ rwork[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = rwork[(j << 1) - 1];
+ sn = rwork[j * 2];
+ csrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__
+ * b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ claset_("A", n, nrhs, &c_b56, &c_b56, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ irwu = 1;
+ irwvt = irwu + *n * *n;
+ irwwrk = irwvt + *n * *n;
+ irwrb = irwwrk;
+ irwib = irwrb + *n * *nrhs;
+ irwb = irwib + *n * *nrhs;
+ slaset_("A", n, n, &c_b328, &c_b1034, &rwork[irwu], n);
+ slaset_("A", n, n, &c_b328, &c_b1034, &rwork[irwvt], n);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n,
+ &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to SLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L40: */
+ }
+/* L50: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1034, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b328, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1034, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b328, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ claset_("A", &c__1, nrhs, &c_b56, &c_b56, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b1034, &c__1, nrhs, &
+ b[i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L100: */
+ }
+
+/*
+ Since B is complex, the following call to SGEMM is performed
+ in two steps (real and imaginary parts). That is for V * B
+ (in the real version of the code V' is stored in WORK).
+
+ CALL SGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO,
+ $ WORK( NWORK ), N )
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L110: */
+ }
+/* L120: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1034, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b328, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1034, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b328, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b1034, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ nrwork = givnum + (nlvl << 1) * *n;
+ bx = 1;
+
+ irwrb = nrwork;
+ irwib = irwrb + *smlsiz * *nrhs;
+ irwb = irwib + *smlsiz * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L170: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ ccopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ ccopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b328, &c_b1034, &rwork[vt +
+ st1], n);
+ slaset_("A", &nsize, &nsize, &c_b328, &c_b1034, &rwork[u +
+ st1], n);
+ slasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], &
+ e[st], &rwork[vt + st1], n, &rwork[u + st1], n, &
+ rwork[nrwork], &c__1, &rwork[nrwork], info)
+ ;
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to SLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L180: */
+ }
+/* L190: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1034, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b328, &rwork[irwrb],
+ &nsize);
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1034, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b328, &rwork[irwib],
+ &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L220: */
+ }
+/* L230: */
+ }
+
+ clacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1],
+ &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ +
+ st1], &rwork[poles + st1], &iwork[givptr + st1], &
+ iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &
+ rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1]
+ , &rwork[z__ + st1], &rwork[poles + st1], &iwork[
+ givptr + st1], &iwork[givcol + st1], n, &iwork[perm +
+ st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[
+ s + st1], &rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L240: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ claset_("A", &c__1, nrhs, &c_b56, &c_b56, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b1034, &c__1, nrhs, &
+ work[bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L250: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ ccopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE,
+ $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO,
+ $ B( ST, 1 ), LDB )
+*/
+
+ j = bxst - *n - 1;
+ jreal = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jreal;
+ i__4 = j + jrow;
+ rwork[jreal] = work[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1034, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b328, &rwork[irwrb], &nsize);
+ j = bxst - *n - 1;
+ jimag = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jimag;
+ rwork[jimag] = r_imag(&work[j + jrow]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1034, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b328, &rwork[irwib], &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+ } else {
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &
+ rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[
+ nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L320: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b1034, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of CLALSD */
+
+} /* clalsd_ */
+
doublereal clange_(char *norm, integer *m, integer *n, complex *a, integer *
lda, real *work)
{
}
l = *m * *n + 1;
- sgemm_("N", "N", m, n, m, &c_b894, &a[a_offset], lda, &rwork[1], m, &
- c_b1087, &rwork[l], m);
+ sgemm_("N", "N", m, n, m, &c_b1034, &a[a_offset], lda, &rwork[1], m, &
+ c_b328, &rwork[l], m);
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
i__2 = *m;
}
/* L60: */
}
- sgemm_("N", "N", m, n, m, &c_b894, &a[a_offset], lda, &rwork[1], m, &
- c_b1087, &rwork[l], m);
+ sgemm_("N", "N", m, n, m, &c_b1034, &a[a_offset], lda, &rwork[1], m, &
+ c_b328, &rwork[l], m);
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
i__2 = *m;
/* Scale x by 1/2. */
- csscal_(n, &c_b2023, &x[1], &c__1);
+ csscal_(n, &c_b2435, &x[1], &c__1);
*scale *= .5f;
}
a[i__ + (i__ + ib) * a_dim1], lda, &c_b57, &a[i__
* a_dim1 + 1], lda);
i__3 = *n - i__ - ib + 1;
- cherk_("Upper", "No transpose", &ib, &i__3, &c_b894, &a[
- i__ + (i__ + ib) * a_dim1], lda, &c_b894, &a[i__
+ cherk_("Upper", "No transpose", &ib, &i__3, &c_b1034, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b1034, &a[i__
+ i__ * a_dim1], lda);
}
/* L10: */
a_dim1], lda);
i__3 = *n - i__ - ib + 1;
cherk_("Lower", "Conjugate transpose", &ib, &i__3, &
- c_b894, &a[i__ + ib + i__ * a_dim1], lda, &c_b894,
- &a[i__ + i__ * a_dim1], lda);
+ c_b1034, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b1034, &a[i__ + i__ * a_dim1], lda);
}
/* L20: */
}
i__3 = nb, i__4 = *n - j + 1;
jb = min(i__3,i__4);
i__3 = j - 1;
- cherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b1136, &
- a[j * a_dim1 + 1], lda, &c_b894, &a[j + j * a_dim1],
+ cherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b1276, &
+ a[j * a_dim1 + 1], lda, &c_b1034, &a[j + j * a_dim1],
lda);
cpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
if (*info != 0) {
i__3 = nb, i__4 = *n - j + 1;
jb = min(i__3,i__4);
i__3 = j - 1;
- cherk_("Lower", "No transpose", &jb, &i__3, &c_b1136, &a[j +
- a_dim1], lda, &c_b894, &a[j + j * a_dim1], lda);
+ cherk_("Lower", "No transpose", &jb, &i__3, &c_b1276, &a[j +
+ a_dim1], lda, &c_b1034, &a[j + j * a_dim1], lda);
cpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
if (*info != 0) {
goto L30;
/* If COMPZ = 'I', we simply call SSTEDC instead. */
if (icompz == 2) {
- slaset_("Full", n, n, &c_b1087, &c_b894, &rwork[1], n);
+ slaset_("Full", n, n, &c_b328, &c_b1034, &rwork[1], n);
ll = *n * *n + 1;
i__1 = *lrwork - ll + 1;
sstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, &
/* Scale. */
orgnrm = slanst_("M", &m, &d__[start], &e[start]);
- slascl_("G", &c__0, &c__0, &orgnrm, &c_b894, &m, &c__1, &d__[
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, &m, &c__1, &d__[
start], &m, info);
i__1 = m - 1;
i__2 = m - 1;
- slascl_("G", &c__0, &c__0, &orgnrm, &c_b894, &i__1, &c__1, &e[
- start], &i__2, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1034, &i__1, &c__1, &
+ e[start], &i__2, info);
claed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 +
1], ldz, &work[1], n, &rwork[1], &iwork[1], info);
/* Scale back. */
- slascl_("G", &c__0, &c__0, &c_b894, &orgnrm, &m, &c__1, &d__[
+ slascl_("G", &c__0, &c__0, &c_b1034, &orgnrm, &m, &c__1, &d__[
start], &m, info);
} else {
/* Form shift. */
g = (d__[l + 1] - p) / (e[l] * 2.f);
- r__ = slapy2_(&g, &c_b894);
+ r__ = slapy2_(&g, &c_b1034);
g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
s = 1.f;
/* Form shift. */
g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
- r__ = slapy2_(&g, &c_b894);
+ r__ = slapy2_(&g, &c_b1034);
g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
s = 1.f;
static integer c__3 = 3;
static integer c__2 = 2;
static integer c__65 = 65;
+static integer c__6 = 6;
static integer c__12 = 12;
static integer c__49 = 49;
static integer c__4 = 4;
static integer c__14 = 14;
static integer c__16 = 16;
static logical c_true = TRUE_;
-static real c_b2863 = 2.f;
+static real c_b3178 = 2.f;
/* Subroutine */ int sbdsdc_(char *uplo, char *compq, integer *n, real *d__,
real *e, real *u, integer *ldu, real *vt, integer *ldvt, real *q,
} /* sgelqf_ */
-/* Subroutine */ int sgeqr2_(integer *m, integer *n, real *a, integer *lda,
- real *tau, real *work, integer *info)
+/* Subroutine */ int sgelsd_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, real *s, real *rcond, integer *
+ rank, real *work, integer *lwork, integer *iwork, integer *info)
{
/* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
/* Local variables */
- static integer i__, k;
- static real aii;
- extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
- integer *, real *, real *, integer *, real *), xerbla_(
- char *, integer *), slarfg_(integer *, real *, real *,
- integer *, real *);
+ static integer ie, il, mm;
+ static real eps, anrm, bnrm;
+ static integer itau, nlvl, iascl, ibscl;
+ static real sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int slabad_(real *, real *), sgebrd_(integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slalsd_(char *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, integer *), slascl_(char *
+ , integer *, integer *, real *, real *, integer *, integer *,
+ real *, integer *, integer *);
+ static integer wlalsd;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slacpy_(char *, integer
+ *, integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+ , real *, integer *, integer *);
+ static integer liwork, minwrk, maxwrk;
+ static real smlnum;
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ static logical lquery;
+ static integer smlsiz;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
/*
- -- LAPACK routine (version 3.2.2) --
+ -- LAPACK driver routine (version 3.2) --
-- LAPACK is a software package provided by Univ. of Tennessee, --
-- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
- June 2010
+ November 2006
Purpose
=======
- SGEQR2 computes a QR factorization of a real m by n matrix A:
- A = Q * R.
+ SGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder transformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
Arguments
=========
M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
+ The number of rows of A. M >= 0.
N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
+ The number of columns of A. N >= 0.
- A (input/output) REAL array, dimension (LDA,N)
- On entry, the m by n matrix A.
- On exit, the elements on and above the diagonal of the array
- contain the min(m,n) by n upper trapezoidal matrix R (R is
- upper triangular if m >= n); the elements below the diagonal,
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors (see Further Details).
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
- TAU (output) REAL array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution
+ matrix X. If m >= n and RANK = n, the residual
+ sum-of-squares for the solution in the i-th column is given
+ by the sum of squares of elements n+1:m in that column.
- WORK (workspace) REAL array, dimension (N)
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,max(M,N)).
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
- Further Details
- ===============
+ RCOND (input) REAL
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
- The matrix Q is represented as a product of elementary reflectors
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
- Q = H(1) H(2) . . . H(k), where k = min(m,n).
+ WORK (workspace/output) REAL array, dimension (MAX(1,LWORK))
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
- Each H(i) has the form
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2,
+ if M is greater than or equal to N or
+ 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2,
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+ For good performance, LWORK should generally be larger.
- H(i) = I - tau * v * v'
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the array WORK and the
+ minimum size of the array IWORK, and returns these values as
+ the first entries of the WORK and IWORK arrays, and no error
+ message related to LWORK is issued by XERBLA.
- where tau is a real scalar, and v is a real vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
- and tau in TAU(i).
+ IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK))
+ LIWORK >= max(1, 3*MINMN*NLVL + 11*MINMN),
+ where MINMN = MIN( M,N ).
+ On exit, if INFO = 0, IWORK(1) returns the minimum LIWORK.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
=====================================================================
- Test the input arguments
+ Test the input arguments.
*/
/* Parameter adjustments */
a_dim1 = *lda;
a_offset = 1 + a_dim1;
a -= a_offset;
- --tau;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
--work;
+ --iwork;
/* Function Body */
*info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ lquery = *lwork == -1;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
} else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("SGEQR2", &i__1);
- return 0;
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
}
- k = min(*m,*n);
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
- i__1 = k;
- for (i__ = 1; i__ <= i__1; ++i__) {
+ if (*info == 0) {
+ minwrk = 1;
+ maxwrk = 1;
+ liwork = 1;
+ if (minmn > 0) {
+ smlsiz = ilaenv_(&c__9, "SGELSD", " ", &c__0, &c__0, &c__0, &c__0,
+ (ftnlen)6, (ftnlen)1);
+ mnthr = ilaenv_(&c__6, "SGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = (integer) (log((real) minmn / (real) (smlsiz + 1)) / log(
+ 2.f)) + 1;
+ nlvl = max(i__1,0);
+ liwork = minmn * 3 * nlvl + minmn * 11;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
-/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+/*
+ Path 1a - overdetermined, with many more rows than
+ columns.
+*/
- i__2 = *m - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
- , &c__1, &tau[i__]);
- if (i__ < *n) {
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SGEQRF",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "SORMQR",
+ "LT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
-/* Apply H(i) to A(i:m,i+1:n) from the left */
+/*
+ Path 1 - overdetermined or exactly determined.
- aii = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.f;
- i__2 = *m - i__ + 1;
- i__3 = *n - i__;
- slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
- i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
- a[i__ + i__ * a_dim1] = aii;
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1,
+ "SGEBRD", " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "SORMBR"
+ , "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1,
+ "SORMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + (*n << 1) * smlsiz + (*n << 3) * nlvl + *n *
+ *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,
+ i__2), i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + (*m << 1) * smlsiz + (*m << 3) * nlvl + *m *
+ *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) *
+ ilaenv_(&c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs *
+ ilaenv_(&c__1, "SORMBR", "QLT", m, nrhs, m, &c_n1,
+ (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) *
+ ilaenv_(&c__1, "SORMBR", "PLN", m, nrhs, m, &c_n1,
+ (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "SORMLQ"
+ , "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + wlalsd;
+ maxwrk = max(i__1,i__2);
+/*
+ XXX: Ensure the Path 2a case below is triggered. The workspace
+ calculation should use queries for all routines eventually.
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4),
+ i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = maxwrk, i__2 = (*m << 2) + *m * *m + max(i__3,i__4)
+ ;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "SGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1,
+ "SORMBR", "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORM"
+ "BR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)
+ 3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,
+ i__2), i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (real) maxwrk;
+ iwork[1] = liwork;
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
}
-/* L10: */
}
- return 0;
-/* End of SGEQR2 */
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
-} /* sgeqr2_ */
+/* Quick return if possible. */
-/* Subroutine */ int sgeqrf_(integer *m, integer *n, real *a, integer *lda,
- real *tau, real *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ if (*m == 0 || *n == 0) {
+ *rank = 0;
+ return 0;
+ }
- /* Local variables */
- static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int sgeqr2_(integer *, integer *, real *, integer
- *, real *, real *, integer *), slarfb_(char *, char *, char *,
- char *, integer *, integer *, integer *, real *, integer *, real *
- , integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
- real *, integer *, real *, real *, integer *);
- static integer ldwork, lwkopt;
- static logical lquery;
+/* Get machine parameters. */
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
-/*
- -- LAPACK routine (version 3.2) --
- -- LAPACK is a software package provided by Univ. of Tennessee, --
- -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
- November 2006
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
- Purpose
- =======
+/* Scale matrix norm up to SMLNUM. */
- SGEQRF computes a QR factorization of a real M-by-N matrix A:
- A = Q * R.
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
- Arguments
- =========
+/* Scale matrix norm down to BIGNUM. */
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (Workspace: need N+NRHS, prefer N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A.
+ (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max(
+ i__1,*nrhs), i__2 = *n - *m * 3, i__1 = max(i__1,i__2);
+ if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,wlalsd)) {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda +
+ *m + *m * *nrhs, i__1 = max(i__1,i__2), i__2 = (*m << 2)
+ + *m * *lda + wlalsd;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (Workspace: need M+NRHS, prefer M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A.
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (real) maxwrk;
+ iwork[1] = liwork;
+ return 0;
+
+/* End of SGELSD */
+
+} /* sgelsd_ */
+
+/* Subroutine */ int sgeqr2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.2.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ June 2010
+
+
+ Purpose
+ =======
+
+ SGEQR2 computes a QR factorization of a real m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input/output) REAL array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
+ On entry, the m by n matrix A.
On exit, the elements on and above the diagonal of the array
- contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
- product of min(m,n) elementary reflectors (see Further
- Details).
+ product of elementary reflectors (see Further Details).
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
The scalar factors of the elementary reflectors (see Further
Details).
- WORK (workspace/output) REAL array, dimension (MAX(1,LWORK))
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,N).
- For optimum performance LWORK >= N*NB, where NB is
- the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
+ WORK (workspace) REAL array, dimension (N)
INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
Further Details
===============
/* Function Body */
*info = 0;
- nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
- 1);
- lwkopt = *n * nb;
- work[1] = (real) lwkopt;
- lquery = *lwork == -1;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*lda < max(1,*m)) {
*info = -4;
- } else if (*lwork < max(1,*n) && ! lquery) {
- *info = -7;
}
if (*info != 0) {
i__1 = -(*info);
- xerbla_("SGEQRF", &i__1);
- return 0;
- } else if (lquery) {
+ xerbla_("SGEQR2", &i__1);
return 0;
}
-/* Quick return if possible */
-
k = min(*m,*n);
- if (k == 0) {
- work[1] = 1.f;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *n;
- if (nb > 1 && nb < k) {
-
-/*
- Determine when to cross over from blocked to unblocked code.
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQRF", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < k) {
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
-/* Determine if workspace is large enough for blocked code. */
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
- ldwork = *n;
- iws = ldwork * nb;
- if (*lwork < iws) {
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
+/* Apply H(i) to A(i:m,i+1:n) from the left */
- nb = *lwork / ldwork;
-/* Computing MAX */
- i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQRF", " ", m, n, &c_n1, &
- c_n1, (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- }
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGEQR2 */
+
+} /* sgeqr2_ */
+
+/* Subroutine */ int sgeqrf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgeqr2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ SGEQRF computes a QR factorization of a real M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) REAL array, dimension (MAX(1,LWORK))
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
}
}
#undef ci
+/* Subroutine */ int slals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, real *b, integer *ldb, real *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static real dj;
+ static integer nlp1;
+ static real temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static real diflj, difrj, dsigj;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), scopy_(
+ integer *, real *, integer *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real dsigjp;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ SLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) REAL array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) REAL array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) REAL array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) REAL array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) REAL array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) REAL array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) REAL array, dimension ( K )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if (*sqre < 0 || *sqre > 1) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ srot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]);
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ scopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ scopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ sscal_(nrhs, &c_b151, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + (poles_dim1 << 1)];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + (poles_dim1 << 1)];
+ }
+ if (z__[j] == 0.f || poles[j + (poles_dim1 << 1)] == 0.f) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj /
+ (poles[j + (poles_dim1 << 1)] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigj) - diflj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] ==
+ 0.f) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__]
+ / (slamc3_(&poles[i__ + (poles_dim1 << 1)], &
+ dsigjp) + difrj) / (poles[i__ + (poles_dim1 <<
+ 1)] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.f;
+ temp = snrm2_(k, &work[1], &c__1);
+ sgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], &
+ c__1, &c_b29, &b[j + b_dim1], ldb);
+ slascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j +
+ b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ scopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + (poles_dim1 << 1)];
+ if (z__[j] == 0.f) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + (difr_dim1 << 1)];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + (difr_dim1 << 1)];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + (poles_dim1 << 1)];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + (difr_dim1 << 1)];
+ }
+/* L70: */
+ }
+ sgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b29, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ scopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ srot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ scopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ scopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ srot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, &
+ b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ +
+ (givnum_dim1 << 1)], &r__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of SLALS0 */
+
+} /* slals0_ */
+
+/* Subroutine */ int slalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, real *b, integer *ldb, real *bx, integer *ldbx, real *
+ u, integer *ldu, real *vt, integer *k, real *difl, real *difr, real *
+ z__, real *poles, integer *givptr, integer *givcol, integer *ldgcol,
+ integer *perm, real *givnum, real *c__, real *s, real *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, nlvl, sqre, inode, ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer ndimr;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slals0_(integer *, integer *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+ , real *, real *, integer *, real *, real *, real *, integer *),
+ xerbla_(char *, integer *), slasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ November 2006
+
+
+ Purpose
+ =======
+
+ SLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, SLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, SLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by SLALSA.
+
+ Arguments
+ =========
+
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) REAL array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M.
+ On output, B contains the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) REAL array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) REAL array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) REAL array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) REAL array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) REAL array.
+ The dimension must be at least N.
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*icompq < 0 || *icompq > 1) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 50.
+*/
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[
+ nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[
+ nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ scopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = (lf << 1) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = (lvl << 1) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = (lf << 1) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu,
+ &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu,
+ &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of SLALSA */
+
+} /* slalsa_ */
+
+/* Subroutine */ int slalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, real *b, integer *ldb, real *rcond,
+ integer *rank, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static real r__;
+ static integer s, u, z__;
+ static real cs;
+ static integer bx;
+ static real sn;
+ static integer st, vt, nm1, st1;
+ static real eps;
+ static integer iwk;
+ static real tol;
+ static integer difl, difr;
+ static real rcnd;
+ static integer perm, nsub, nlvl, sqre, bxst;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sgemm_(char *, char *, integer *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ , real *, real *, integer *);
+ static integer poles, sizei, nsize;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static integer nwork, icmpq1, icmpq2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ xerbla_(char *, integer *), slalsa_(integer *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+ , real *, integer *, integer *), slascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+ , integer *);
+ static integer givcol;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *),
+ slacpy_(char *, integer *, integer *, real *, integer *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+ ), slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static real orgnrm;
+ static integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ static integer givptr, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.2.2) --
+ -- LAPACK is a software package provided by Univ. of Tennessee, --
+ -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+ June 2010
+
+
+ Purpose
+ =======
+
+ SLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) REAL array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input/output) REAL array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) REAL
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) REAL array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2),
+ where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1).
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute a singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < 1 || *ldb < *n) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if (*rcond <= 0.f || *rcond >= 1.f) {
+ rcnd = eps;
+ } else {
+ rcnd = *rcond;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ slaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ slascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ srot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[(i__ << 1) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[(j << 1) - 1];
+ sn = work[j * 2];
+ srot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ slaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ slaset_("A", n, n, &c_b29, &c_b15, &work[1], n);
+ slasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[
+ i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, &
+ c_b29, &work[nwork], n);
+ slacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + (nlvl * *n << 1);
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + (nlvl << 1) * *n;
+ bx = givnum + (nlvl << 1) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + (nlvl * *n << 1);
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ scopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ scopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1],
+ n);
+ slasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ slacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[
+ bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ scopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n,
+ &work[bxst], n, &c_b29, &b[st + b_dim1], ldb);
+ } else {
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of SLALSD */
+
+} /* slalsd_ */
+
/* Subroutine */ int slamrg_(integer *n1, integer *n2, real *a, integer *
strd1, integer *strd2, integer *index)
{
/* Note that M is very tiny */
if (l == 0.f) {
- t = r_sign(&c_b2863, &ft) * r_sign(&c_b15, >);
+ t = r_sign(&c_b3178, &ft) * r_sign(&c_b15, >);
} else {
t = gt / r_sign(&d__, &ft) + m / t;
}
ccopy
cgeev
+cgelsd
cgemm
cgesdd
cgesv
dsyevd
scopy
sgeev
+sgelsd
sgemm
sgesdd
sgesv
/* Initialization function for the module */
#if PY_MAJOR_VERSION >= 3
-#define RETVAL m
+#define RETVAL(x) x
PyMODINIT_FUNC PyInit_lapack_lite(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
initlapack_lite(void)
#endif
"", (PyObject*)NULL,PYTHON_API_VERSION);
#endif
if (m == NULL) {
- return RETVAL;
+ return RETVAL(NULL);
}
import_array();
d = PyModule_GetDict(m);
LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL);
PyDict_SetItemString(d, "LapackError", LapackError);
- return RETVAL;
+ return RETVAL(m);
}
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
+import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
- csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
- add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, object_, ones, matmul,
- swapaxes, divide, count_nonzero, ndarray, isnan
+ csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
+ add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
+ finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
+ atleast_2d, intp, asanyarray, object_, matmul,
+ swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
-from numpy.lib import triu, asfarray
+from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
-from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
+def _raise_linalgerror_lstsq(err, flag):
+ raise LinAlgError("SVD did not converge in Linear Least Squares")
+
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
def _assertNdSquareness(*arrays):
for a in arrays:
- if max(a.shape[-2:]) != min(a.shape[-2:]):
+ m, n = a.shape[-2:]
+ if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
return wrap(ainv.astype(result_t, copy=False))
+def matrix_power(a, n):
+ """
+ Raise a square matrix to the (integer) power `n`.
+
+ For positive integers `n`, the power is computed by repeated matrix
+ squarings and matrix multiplications. If ``n == 0``, the identity matrix
+ of the same shape as M is returned. If ``n < 0``, the inverse
+ is computed and then raised to the ``abs(n)``.
+
+ .. note:: Stacks of object matrices are not currently supported.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Matrix to be "powered."
+ n : int
+ The exponent can be any integer or long integer, positive,
+ negative, or zero.
+
+ Returns
+ -------
+ a**n : (..., M, M) ndarray or matrix object
+ The return value is the same shape and type as `M`;
+ if the exponent is positive or zero then the type of the
+ elements is the same as those of `M`. If the exponent is
+ negative the elements are floating-point.
+
+ Raises
+ ------
+ LinAlgError
+ For matrices that are not square or that (for negative powers) cannot
+ be inverted numerically.
+
+ Examples
+ --------
+ >>> from numpy.linalg import matrix_power
+ >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
+ >>> matrix_power(i, 3) # should = -i
+ array([[ 0, -1],
+ [ 1, 0]])
+ >>> matrix_power(i, 0)
+ array([[1, 0],
+ [0, 1]])
+ >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
+ array([[ 0., 1.],
+ [-1., 0.]])
+
+ Somewhat more sophisticated example
+
+ >>> q = np.zeros((4, 4))
+ >>> q[0:2, 0:2] = -i
+ >>> q[2:4, 2:4] = i
+ >>> q # one of the three quaternion units not equal to 1
+ array([[ 0., -1., 0., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 0., 0., 1.],
+ [ 0., 0., -1., 0.]])
+ >>> matrix_power(q, 2) # = -np.eye(4)
+ array([[-1., 0., 0., 0.],
+ [ 0., -1., 0., 0.],
+ [ 0., 0., -1., 0.],
+ [ 0., 0., 0., -1.]])
+
+ """
+ a = asanyarray(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
+
+ try:
+ n = operator.index(n)
+ except TypeError:
+ raise TypeError("exponent must be an integer")
+
+ # Fall back on dot for object arrays. Object arrays are not supported by
+ # the current implementation of matmul using einsum
+ if a.dtype != object:
+ fmatmul = matmul
+ elif a.ndim == 2:
+ fmatmul = dot
+ else:
+ raise NotImplementedError(
+ "matrix_power not supported for stacks of object arrays")
+
+ if n == 0:
+ a = empty_like(a)
+ a[...] = eye(a.shape[-2], dtype=a.dtype)
+ return a
+
+ elif n < 0:
+ a = inv(a)
+ n = abs(n)
+
+ # short-cuts.
+ if n == 1:
+ return a
+
+ elif n == 2:
+ return fmatmul(a, a)
+
+ elif n == 3:
+ return fmatmul(fmatmul(a, a), a)
+
+ # Use binary decomposition to reduce the number of matrix multiplications.
+ # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
+ # increasing powers of 2, and multiply into the result as needed.
+ z = result = None
+ while n > 0:
+ z = a if z is None else fmatmul(z, z)
+ n, bit = divmod(n, 2)
+ if bit:
+ result = z if result is None else fmatmul(result, z)
+
+ return result
+
+
# Cholesky decomposition
def cholesky(a):
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
- m = a.shape[-2]
- n = a.shape[-1]
+ m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
- res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
+ m, n = a.shape[-2:]
+ res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
[ 2., 1.],
[ 3., 1.]])
- >>> m, c = np.linalg.lstsq(A, y)[0]
+ >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> print(m, c)
1.0 -0.95
>>> plt.show()
"""
- import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
- m = a.shape[0]
- n = a.shape[1]
- n_rhs = b.shape[1]
- ldb = max(n, m)
- if m != b.shape[0]:
+ m, n = a.shape[-2:]
+ m2, n_rhs = b.shape[-2:]
+ if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
FutureWarning, stacklevel=2)
rcond = -1
if rcond is None:
- rcond = finfo(t).eps * ldb
-
- bstar = zeros((ldb, n_rhs), t)
- bstar[:m, :n_rhs] = b
- a, bstar = _fastCopyAndTranspose(t, a, bstar)
- a, bstar = _to_native_byte_order(a, bstar)
- s = zeros((min(m, n),), real_t)
- # This line:
- # * is incorrect, according to the LAPACK documentation
- # * raises a ValueError if min(m,n) == 0
- # * should not be calculated here anyway, as LAPACK should calculate
- # `liwork` for us. But that only works if our version of lapack does
- # not have this bug:
- # http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
- # Lapack_lite does have that bug...
- nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
- iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgelsd
- lwork = 1
- rwork = zeros((lwork,), real_t)
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, -1, rwork, iwork, 0)
- lrwork = int(rwork[0])
- lwork = int(work[0].real)
- work = zeros((lwork,), t)
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, lwork, rwork, iwork, 0)
- else:
- lapack_routine = lapack_lite.dgelsd
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, -1, iwork, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, lwork, iwork, 0)
- if results['info'] > 0:
- raise LinAlgError('SVD did not converge in Linear Least Squares')
-
- # undo transpose imposed by fortran-order arrays
- b_out = bstar.T
-
- # b_out contains both the solution and the components of the residuals
- x = b_out[:n,:]
- r_parts = b_out[n:,:]
- if isComplexType(t):
- resids = sum(abs(r_parts)**2, axis=-2)
+ rcond = finfo(t).eps * max(n, m)
+
+ if m <= n:
+ gufunc = _umath_linalg.lstsq_m
else:
- resids = sum(r_parts**2, axis=-2)
+ gufunc = _umath_linalg.lstsq_n
- rank = results['rank']
+ signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
+ x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
# remove the axis we added
if is_1d:
from subprocess import PIPE, Popen
import sys
import re
+import pytest
from numpy.linalg import lapack_lite
-from numpy.testing import run_module_suite, assert_, dec
+from numpy.testing import assert_
class FindDependenciesLdd(object):
class TestF77Mismatch(object):
- @dec.skipif(not(sys.platform[:5] == 'linux'),
- "Skipping fortran compiler mismatch on non Linux platform")
+ @pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
+ reason="no fortran compiler on non-Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(lapack_lite.__file__,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import assert_warns, run_module_suite
+from numpy.testing import assert_warns
def test_qr_mode_full_future_warning():
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import itertools
import traceback
-import warnings
+import textwrap
+import subprocess
+import pytest
import numpy as np
-from numpy import array, single, double, csingle, cdouble, dot, identity
+from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
- assert_almost_equal, assert_allclose, run_module_suite,
- dec, SkipTest, suppress_warnings
-)
+ assert_almost_equal, assert_allclose, SkipTest, suppress_warnings
+ )
-def ifthen(a, b):
- return not a or b
-
-
-def imply(a, b):
- return not a or b
+def consistent_subclass(out, in_):
+ # For ndarray subclass input, our output should have the same subclass
+ # (non-ndarray input gets converted to ndarray).
+ return type(out) is (type(in_) if isinstance(in_, np.ndarray)
+ else np.ndarray)
old_assert_almost_equal = assert_almost_equal
'generalized', 'size-0', 'strided' # optional additions
}
+
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
+
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
- LinalgCase("0x0_matrix",
- np.empty((0, 0), dtype=double).view(np.matrix),
- np.empty((0, 1), dtype=double).view(np.matrix),
- tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
- LinalgCase("matrix_b_only",
- array([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
- LinalgCase("matrix_a_and_b",
- matrix([[1., 2.], [3., 4.]]),
- matrix([2., 1.]).T),
])
# non-square test-cases
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
- LinalgCase("hmatrix_a_and_b",
- matrix([[1., 2.], [2., 1.]]),
- None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
return new_cases
+
CASES += _make_generalized_cases()
+
#
# Generate stride combination variations of the above
#
-
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
+
def _make_strided_cases():
new_cases = []
for case in CASES:
new_cases.append(new_case)
return new_cases
+
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
+class LinalgTestCase(object):
+ TEST_CASES = CASES
-def _check_cases(func, require=set(), exclude=set()):
- """
- Run func on each of the cases with all of the tags in require, and none
- of the tags in exclude
- """
- for case in CASES:
- # filter by require and exclude
- if case.tags & require != require:
- continue
- if case.tags & exclude:
- continue
+ def check_cases(self, require=set(), exclude=set()):
+ """
+ Run func on each of the cases with all of the tags in require, and none
+ of the tags in exclude
+ """
+ for case in self.TEST_CASES:
+ # filter by require and exclude
+ if case.tags & require != require:
+ continue
+ if case.tags & exclude:
+ continue
- try:
- case.check(func)
- except Exception:
- msg = "In test case: %r\n\n" % case
- msg += traceback.format_exc()
- raise AssertionError(msg)
+ try:
+ case.check(self.do)
+ except Exception:
+ msg = "In test case: %r\n\n" % case
+ msg += traceback.format_exc()
+ raise AssertionError(msg)
-class LinalgSquareTestCase(object):
+class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
- _check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'square'},
+ exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
- _check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'square', 'size-0'},
+ exclude={'generalized'})
-class LinalgNonsquareTestCase(object):
+class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'nonsquare'},
+ exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
- _check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'nonsquare', 'size-0'},
+ exclude={'generalized'})
-class HermitianTestCase(object):
+
+class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
- _check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
+ self.check_cases(require={'hermitian'},
+ exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
- _check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
+ self.check_cases(require={'hermitian', 'size-0'},
+ exclude={'generalized'})
-class LinalgGeneralizedSquareTestCase(object):
+class LinalgGeneralizedSquareTestCase(LinalgTestCase):
- @dec.slow
+ @pytest.mark.slow
def test_generalized_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'square'},
+ exclude={'size-0'})
- @dec.slow
+ @pytest.mark.slow
def test_generalized_empty_sq_cases(self):
- _check_cases(self.do, require={'generalized', 'square', 'size-0'})
+ self.check_cases(require={'generalized', 'square', 'size-0'})
-class LinalgGeneralizedNonsquareTestCase(object):
+class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
- @dec.slow
+ @pytest.mark.slow
def test_generalized_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare'},
+ exclude={'size-0'})
- @dec.slow
+ @pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
- _check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
+ self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
-class HermitianGeneralizedTestCase(object):
+class HermitianGeneralizedTestCase(LinalgTestCase):
- @dec.slow
+ @pytest.mark.slow
def test_generalized_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian'},
- exclude={'size-0'})
+ self.check_cases(require={'generalized', 'hermitian'},
+ exclude={'size-0'})
- @dec.slow
+ @pytest.mark.slow
def test_generalized_empty_herm_cases(self):
- _check_cases(self.do,
- require={'generalized', 'hermitian', 'size-0'},
- exclude={'none'})
+ self.check_cases(require={'generalized', 'hermitian', 'size-0'},
+ exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
- for c in itertools.product(*map(range, a.shape[:-2])):
- r[c] = identity(a.shape[-2])
+ r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
-class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
-
+class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
+ assert_(consistent_subclass(x, b))
+
+class TestSolve(SolveCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(result, ArraySubclass))
-class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
- assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
+ assert_(consistent_subclass(a_inv, a))
+
+class TestInv(InvCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(res, ArraySubclass))
-class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
+
+class TestEigvals(EigvalsCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(res, np.ndarray))
-class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
+ assert_(consistent_subclass(evectors, a))
+
+class TestEig(EigCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(a, np.ndarray))
-class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
- assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
- assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+class TestSVD(SVDCases):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_raises(linalg.LinAlgError, linalg.svd, a)
-class TestCond(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
+
+class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_(np.isfinite(c[1,0]))
-class TestPinv(LinalgSquareTestCase,
- LinalgNonsquareTestCase,
- LinalgGeneralizedSquareTestCase,
- LinalgGeneralizedNonsquareTestCase):
+class PinvCases(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
- assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
+ assert_(consistent_subclass(a_ginv, a))
-class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class TestPinv(PinvCases):
+ pass
+
+
+class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
+
+class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_(res[1].dtype.type is np.float64)
-class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
- assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
- assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ assert_(consistent_subclass(x, b))
+ assert_(consistent_subclass(residuals, b))
+
+class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
+
+@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower(object):
- R90 = array([[0, 1], [-1, 0]])
- Arb22 = array([[4, -7], [-2, 10]])
+
+ rshft_0 = np.eye(4)
+ rshft_1 = rshft_0[[3, 0, 1, 2]]
+ rshft_2 = rshft_0[[2, 3, 0, 1]]
+ rshft_3 = rshft_0[[1, 2, 3, 0]]
+ rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
- arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
+ stacked = np.block([[[rshft_0]]]*2)
+ #FIXME the 'e' dtype might work in future
+ dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
- large = identity(10)
- t = large[1, :].copy()
- large[1, :] = large[0,:]
- large[0, :] = t
- def test_large_power(self):
+ def test_large_power(self, dt):
+ power = matrix_power
+ rshft = self.rshft_1.astype(dt)
assert_equal(
- matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90)
-
- def test_large_power_trailing_zero(self):
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
- matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5), identity(2))
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
- def testip_zero(self):
+ def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
- assert_equal(mz, identity(M.shape[0]))
- assert_equal(mz.dtype, M.dtype)
- for M in [self.Arb22, self.arbfloat, self.large]:
- tz(M)
-
- def testip_one(self):
- def tz(M):
- mz = matrix_power(M, 1)
- assert_equal(mz, M)
- assert_equal(mz.dtype, M.dtype)
- for M in [self.Arb22, self.arbfloat, self.large]:
- tz(M)
-
- def testip_two(self):
- def tz(M):
- mz = matrix_power(M, 2)
- assert_equal(mz, dot(M, M))
+ assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
- for M in [self.Arb22, self.arbfloat, self.large]:
- tz(M)
-
- def testip_invert(self):
- def tz(M):
- mz = matrix_power(M, -1)
- assert_almost_equal(identity(M.shape[0]), dot(mz, M))
- for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
- tz(M)
-
- def test_invert_noninvertible(self):
- import numpy.linalg
- assert_raises(numpy.linalg.linalg.LinAlgError,
- lambda: matrix_power(self.noninv, -1))
-
-
-class TestBoolPower(object):
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_one(self, dt):
+ def tz(mat):
+ mz = matrix_power(mat, 1)
+ assert_equal(mz, mat)
+ assert_equal(mz.dtype, mat.dtype)
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_two(self, dt):
+ def tz(mat):
+ mz = matrix_power(mat, 2)
+ mmul = matmul if mat.dtype != object else dot
+ assert_equal(mz, mmul(mat, mat))
+ assert_equal(mz.dtype, mat.dtype)
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_minus_one(self, dt):
+ def tz(mat):
+ invmat = matrix_power(mat, -1)
+ mmul = matmul if mat.dtype != object else dot
+ assert_almost_equal(
+ mmul(invmat, mat), identity_like_generalized(mat))
+
+ for mat in self.rshft_all:
+ if dt not in self.dtnoinv:
+ tz(mat.astype(dt))
+
+ def test_exceptions_bad_power(self, dt):
+ mat = self.rshft_0.astype(dt)
+ assert_raises(TypeError, matrix_power, mat, 1.5)
+ assert_raises(TypeError, matrix_power, mat, [1])
+
+
+ def test_exceptions_non_square(self, dt):
+ assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
+ assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
+ assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
+
+ def test_exceptions_not_invertible(self, dt):
+ if dt in self.dtnoinv:
+ return
+ mat = self.noninv.astype(dt)
+ assert_raises(LinAlgError, matrix_power, mat, -1)
- def test_square(self):
- A = array([[True, False], [True, True]])
- assert_equal(matrix_power(A, 2), A)
-class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
+
+class TestEigvalsh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(res, np.ndarray))
-class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
+class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
+
+class TestEigh(object):
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_(isinstance(a, np.ndarray))
-class _TestNorm(object):
-
+class _TestNormBase(object):
dt = None
dec = None
+
+class _TestNormGeneral(_TestNormBase):
+
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
- def test_matrix_return_type(self):
- a = np.array([[1, 0, 1], [0, 1, 1]])
-
- exact_types = np.typecodes['AllInteger']
-
- # float32, complex64, float64, complex128 types are the only types
- # allowed by `linalg`, which performs the matrix operations used
- # within `norm`.
- inexact_types = 'fdFD'
-
- all_types = exact_types + inexact_types
-
- for each_inexact_types in all_types:
- at = a.astype(each_inexact_types)
-
- an = norm(at, -np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- with suppress_warnings() as sup:
- sup.filter(RuntimeWarning, "divide by zero encountered")
- an = norm(at, -1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, 1)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 3.0**(1.0/2.0))
-
- an = norm(at, -2)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 1.0)
-
- an = norm(at, np.inf)
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'fro')
- assert_(issubclass(an.dtype.type, np.floating))
- assert_almost_equal(an, 2.0)
-
- an = norm(at, 'nuc')
- assert_(issubclass(an.dtype.type, np.floating))
- # Lower bar needed to support low precision floats.
- # They end up being off by 1 in the 7th place.
- old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
-
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
array(c, dtype=self.dt)):
_test(v)
- def test_matrix_2x2(self):
- A = matrix([[1, 3], [5, 7]], dtype=self.dt)
- assert_almost_equal(norm(A), 84 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 10.0)
- assert_almost_equal(norm(A, inf), 12.0)
- assert_almost_equal(norm(A, -inf), 4.0)
- assert_almost_equal(norm(A, 1), 10.0)
- assert_almost_equal(norm(A, -1), 6.0)
- assert_almost_equal(norm(A, 2), 9.1231056256176615)
- assert_almost_equal(norm(A, -2), 0.87689437438234041)
-
- assert_raises(ValueError, norm, A, 'nofro')
- assert_raises(ValueError, norm, A, -3)
- assert_raises(ValueError, norm, A, 0)
-
- def test_matrix_3x3(self):
- # This test has been added because the 2x2 example
- # happened to have equal nuclear norm and induced 1-norm.
- # The 1/10 scaling factor accommodates the absolute tolerance
- # used in assert_almost_equal.
- A = (1 / 10) * \
- np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
- assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
- assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
- assert_almost_equal(norm(A, inf), 1.1)
- assert_almost_equal(norm(A, -inf), 0.6)
- assert_almost_equal(norm(A, 1), 1.0)
- assert_almost_equal(norm(A, -1), 0.4)
- assert_almost_equal(norm(A, 2), 0.88722940323461277)
- assert_almost_equal(norm(A, -2), 0.19456584790481812)
-
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
+
+class _TestNorm2D(_TestNormBase):
+ # Define the part for 2d arrays separately, so we can subclass this
+ # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
+ array = np.array
+
+ def test_matrix_empty(self):
+ assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
+
+ def test_matrix_return_type(self):
+ a = self.array([[1, 0, 1], [0, 1, 1]])
+
+ exact_types = np.typecodes['AllInteger']
+
+ # float32, complex64, float64, complex128 types are the only types
+ # allowed by `linalg`, which performs the matrix operations used
+ # within `norm`.
+ inexact_types = 'fdFD'
+
+ all_types = exact_types + inexact_types
+
+ for each_inexact_types in all_types:
+ at = a.astype(each_inexact_types)
+
+ an = norm(at, -np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, 1)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 3.0**(1.0/2.0))
+
+ an = norm(at, -2)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, np.inf)
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'fro')
+ assert_(issubclass(an.dtype.type, np.floating))
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'nuc')
+ assert_(issubclass(an.dtype.type, np.floating))
+ # Lower bar needed to support low precision floats.
+ # They end up being off by 1 in the 7th place.
+ np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
+
+ def test_matrix_2x2(self):
+ A = self.array([[1, 3], [5, 7]], dtype=self.dt)
+ assert_almost_equal(norm(A), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 10.0)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
+
+ assert_raises(ValueError, norm, A, 'nofro')
+ assert_raises(ValueError, norm, A, -3)
+ assert_raises(ValueError, norm, A, 0)
+
+ def test_matrix_3x3(self):
+ # This test has been added because the 2x2 example
+ # happened to have equal nuclear norm and induced 1-norm.
+ # The 1/10 scaling factor accommodates the absolute tolerance
+ # used in assert_almost_equal.
+ A = (1 / 10) * \
+ self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
+ assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
+ assert_almost_equal(norm(A, inf), 1.1)
+ assert_almost_equal(norm(A, -inf), 0.6)
+ assert_almost_equal(norm(A, 1), 1.0)
+ assert_almost_equal(norm(A, -1), 0.4)
+ assert_almost_equal(norm(A, 2), 0.88722940323461277)
+ assert_almost_equal(norm(A, -2), 0.19456584790481812)
+
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
- A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
assert_raises(ValueError, norm, B, None, (0, 1, 2))
+class _TestNorm(_TestNorm2D, _TestNormGeneral):
+ pass
+
+
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
-class TestNormDouble(_TestNorm):
+# Separate definitions so we can use them for matrix tests.
+class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
-class TestNormSingle(_TestNorm):
+class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
-class TestNormInt64(_TestNorm):
+class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
+class TestNormDouble(_TestNorm, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingle(_TestNorm, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64(_TestNorm, _TestNormInt64Base):
+ pass
+
+
class TestMatrixRank(object):
def test_matrix_rank(self):
class TestQR(object):
+ # Define the array class here, so run this on matrices elsewhere.
+ array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
- a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
- a = array([[1, 2], [3, 4]])
- b = array([[1, 2], [3, 4], [5, 6]])
+ a = self.array([[1, 2], [3, 4]])
+ b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
+
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
- self.check_qr(matrix(m1))
def test_0_size(self):
# There may be good ways to do (some of this) reasonably:
raise SkipTest('Numpy xerbla not linked in.')
+def test_sdot_bug_8577():
+ # Regression test that loading certain other libraries does not
+ # result to wrong results in float32 linear algebra.
+ #
+ # There's a bug gh-8577 on OSX that can trigger this, and perhaps
+ # there are also other situations in which it occurs.
+ #
+ # Do the check in a separate process.
+
+ bad_libs = ['PyQt5.QtWidgets', 'IPython']
+
+ template = textwrap.dedent("""
+ import sys
+ {before}
+ try:
+ import {bad_lib}
+ except ImportError:
+ sys.exit(0)
+ {after}
+ x = np.ones(2, dtype=np.float32)
+ sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
+ """)
+
+ for bad_lib in bad_libs:
+ code = template.format(before="import numpy as np", after="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+ # Swapped import order
+ code = template.format(after="import numpy as np", before="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
- run_module_suite, assert_, assert_raises, assert_equal, assert_array_equal,
+ assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
-
-
-if __name__ == '__main__':
- run_module_suite()
int *info);
extern int
+FNAME(sgelsd)(int *m, int *n, int *nrhs,
+ float a[], int *lda, float b[], int *ldb,
+ float s[], float *rcond, int *rank,
+ float work[], int *lwork, int iwork[],
+ int *info);
+extern int
FNAME(dgelsd)(int *m, int *n, int *nrhs,
double a[], int *lda, double b[], int *ldb,
double s[], double *rcond, int *rank,
double work[], int *lwork, int iwork[],
int *info);
extern int
+FNAME(cgelsd)(int *m, int *n, int *nrhs,
+ f2c_complex a[], int *lda,
+ f2c_complex b[], int *ldb,
+ float s[], float *rcond, int *rank,
+ f2c_complex work[], int *lwork,
+ float rwork[], int iwork[],
+ int *info);
+extern int
FNAME(zgelsd)(int *m, int *n, int *nrhs,
f2c_doublecomplex a[], int *lda,
f2c_doublecomplex b[], int *ldb,
*****************************************************************************
*/
-static NPY_INLINE void *
-offset_ptr(void* ptr, ptrdiff_t offset)
-{
- return (void*)((npy_uint8*)ptr + offset);
-}
-
static NPY_INLINE int
get_fp_invalid_and_clear(void)
{
int status;
- status = npy_clear_floatstatus();
+ status = npy_clear_floatstatus_barrier((char*)&status);
return !!(status & NPY_FPE_INVALID);
}
npy_set_floatstatus_invalid();
}
else {
- npy_clear_floatstatus();
+ npy_clear_floatstatus_barrier((char*)&error_occurred);
}
}
* columns: number of columns in the matrix
* row_strides: the number bytes between consecutive rows.
* column_strides: the number of bytes between consecutive columns.
+ * output_lead_dim: BLAS/LAPACK-side leading dimension, in elements
*/
typedef struct linearize_data_struct
{
npy_intp columns;
npy_intp row_strides;
npy_intp column_strides;
+ npy_intp output_lead_dim;
} LINEARIZE_DATA_t;
+static NPY_INLINE void
+init_linearize_data_ex(LINEARIZE_DATA_t *lin_data,
+ npy_intp rows,
+ npy_intp columns,
+ npy_intp row_strides,
+ npy_intp column_strides,
+ npy_intp output_lead_dim)
+{
+ lin_data->rows = rows;
+ lin_data->columns = columns;
+ lin_data->row_strides = row_strides;
+ lin_data->column_strides = column_strides;
+ lin_data->output_lead_dim = output_lead_dim;
+}
+
static NPY_INLINE void
init_linearize_data(LINEARIZE_DATA_t *lin_data,
npy_intp rows,
npy_intp row_strides,
npy_intp column_strides)
{
- lin_data->rows = rows;
- lin_data->columns = columns;
- lin_data->row_strides = row_strides;
- lin_data->column_strides = column_strides;
+ init_linearize_data_ex(
+ lin_data, rows, columns, row_strides, column_strides, columns);
}
static NPY_INLINE void
params->row_strides, params->column_strides);
}
-
-static NPY_INLINE float
-FLOAT_add(float op1, float op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE double
-DOUBLE_add(double op1, double op2)
-{
- return op1 + op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0] + op2.array[0];
- result.array[1] = op1.array[1] + op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mul(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mul(double op1, double op2)
-{
- return op1*op2;
-}
-
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
- result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
-
- return result;
-}
-
-static NPY_INLINE float
-FLOAT_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE double
-DOUBLE_mulc(float op1, float op2)
-{
- return op1*op2;
-}
-
-static NPY_INLINE COMPLEX_t
-CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2)
-{
- COMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
-static NPY_INLINE DOUBLECOMPLEX_t
-CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
-{
- DOUBLECOMPLEX_t result;
- result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
- result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
-
- return result;
-}
-
static NPY_INLINE void
print_FLOAT(npy_float s)
{
INIT_OUTER_LOOP_5\
npy_intp s5 = *steps++;
+#define INIT_OUTER_LOOP_7 \
+ INIT_OUTER_LOOP_6\
+ npy_intp s6 = *steps++;
+
#define BEGIN_OUTER_LOOP_2 \
for (N_ = 0;\
N_ < dN;\
args[4] += s4,\
args[5] += s5) {
+#define BEGIN_OUTER_LOOP_7 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1,\
+ args[2] += s2,\
+ args[3] += s3,\
+ args[4] += s4,\
+ args[5] += s5,\
+ args[6] += s6) {
+
#define END_OUTER_LOOP }
static NPY_INLINE void
#typ = float, double, COMPLEX_t, DOUBLECOMPLEX_t#
#copy = scopy, dcopy, ccopy, zcopy#
#nan = s_nan, d_nan, c_nan, z_nan#
+ #zero = s_zero, d_zero, c_zero, z_zero#
*/
static NPY_INLINE void *
linearize_@TYPE@_matrix(void *dst_in,
}
}
src += data->row_strides/sizeof(@typ@);
- dst += data->columns;
+ dst += data->output_lead_dim;
}
return rv;
} else {
sizeof(@typ@));
}
}
- src += data->columns;
+ src += data->output_lead_dim;
dst += data->row_strides/sizeof(@typ@);
}
}
}
+static NPY_INLINE void
+zero_@TYPE@_matrix(void *dst_in, const LINEARIZE_DATA_t* data)
+{
+ @typ@ *dst = (@typ@ *) dst_in;
+
+ int i, j;
+ for (i = 0; i < data->rows; i++) {
+ @typ@ *cp = dst;
+ ptrdiff_t cs = data->column_strides/sizeof(@typ@);
+ for (j = 0; j < data->columns; ++j) {
+ *cp = @zero@;
+ cp += cs;
+ }
+ dst += data->row_strides/sizeof(@typ@);
+ }
+}
+
/**end repeat**/
/* identity square matrix generation */
/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* least squares */
+
+typedef struct gelsd_params_struct
+{
+ fortran_int M;
+ fortran_int N;
+ fortran_int NRHS;
+ void *A;
+ fortran_int LDA;
+ void *B;
+ fortran_int LDB;
+ void *S;
+ void *RCOND;
+ fortran_int RANK;
+ void *WORK;
+ fortran_int LWORK;
+ void *RWORK;
+ void *IWORK;
+} GELSD_PARAMS_t;
+
+
+static inline void
+dump_gelsd_params(const char *name,
+ GELSD_PARAMS_t *params)
+{
+ TRACE_TXT("\n%s:\n"\
+
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+
+ "%14s: %18p\n",
+
+ name,
+
+ "A", params->A,
+ "B", params->B,
+ "S", params->S,
+ "WORK", params->WORK,
+ "RWORK", params->RWORK,
+ "IWORK", params->IWORK,
+
+ "M", (int)params->M,
+ "N", (int)params->N,
+ "NRHS", (int)params->NRHS,
+ "LDA", (int)params->LDA,
+ "LDB", (int)params->LDB,
+ "LWORK", (int)params->LWORK,
+ "RANK", (int)params->RANK,
+
+ "RCOND", params->RCOND);
+}
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE#
+ #lapack_func=sgelsd,dgelsd#
+ #ftyp=fortran_real,fortran_doublereal#
+ */
+
+static inline fortran_int
+call_@lapack_func@(GELSD_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(¶ms->M, ¶ms->N, ¶ms->NRHS,
+ params->A, ¶ms->LDA,
+ params->B, ¶ms->LDB,
+ params->S,
+ params->RCOND, ¶ms->RANK,
+ params->WORK, ¶ms->LWORK,
+ params->IWORK,
+ &rv);
+ return rv;
+}
+
+static inline int
+init_@lapack_func@(GELSD_PARAMS_t *params,
+ fortran_int m,
+ fortran_int n,
+ fortran_int nrhs)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ npy_uint8 *a, *b, *s, *work, *iwork;
+ fortran_int min_m_n = fortran_int_min(m, n);
+ fortran_int max_m_n = fortran_int_max(m, n);
+ size_t safe_min_m_n = min_m_n;
+ size_t safe_max_m_n = max_m_n;
+ size_t safe_m = m;
+ size_t safe_n = n;
+ size_t safe_nrhs = nrhs;
+
+ size_t a_size = safe_m * safe_n * sizeof(@ftyp@);
+ size_t b_size = safe_max_m_n * safe_nrhs * sizeof(@ftyp@);
+ size_t s_size = safe_min_m_n * sizeof(@ftyp@);
+
+ fortran_int work_count;
+ size_t work_size;
+ size_t iwork_size;
+ fortran_int lda = fortran_int_max(1, m);
+ fortran_int ldb = fortran_int_max(1, fortran_int_max(m,n));
+
+ mem_buff = malloc(a_size + b_size + s_size);
+
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ b = a + a_size;
+ s = b + b_size;
+
+
+ params->M = m;
+ params->N = n;
+ params->NRHS = nrhs;
+ params->A = a;
+ params->B = b;
+ params->S = s;
+ params->LDA = lda;
+ params->LDB = ldb;
+
+ {
+ /* compute optimal work size */
+ @ftyp@ work_size_query;
+ fortran_int iwork_size_query;
+
+ params->WORK = &work_size_query;
+ params->IWORK = &iwork_size_query;
+ params->RWORK = NULL;
+ params->LWORK = -1;
+
+ if (call_@lapack_func@(params) != 0)
+ goto error;
+
+ work_count = (fortran_int)work_size_query;
+
+ work_size = (size_t) work_size_query * sizeof(@ftyp@);
+ iwork_size = (size_t)iwork_size_query * sizeof(fortran_int);
+ }
+
+ mem_buff2 = malloc(work_size + iwork_size);
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+ iwork = work + work_size;
+
+ params->WORK = work;
+ params->RWORK = NULL;
+ params->IWORK = iwork;
+ params->LWORK = work_count;
+
+ return 1;
+ error:
+ TRACE_TXT("%s failed init\n", __FUNCTION__);
+ free(mem_buff);
+ free(mem_buff2);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+/**end repeat**/
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE#
+ #ftyp=fortran_complex,fortran_doublecomplex#
+ #frealtyp=fortran_real,fortran_doublereal#
+ #typ=COMPLEX_t,DOUBLECOMPLEX_t#
+ #lapack_func=cgelsd,zgelsd#
+ */
+
+static inline fortran_int
+call_@lapack_func@(GELSD_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(¶ms->M, ¶ms->N, ¶ms->NRHS,
+ params->A, ¶ms->LDA,
+ params->B, ¶ms->LDB,
+ params->S,
+ params->RCOND, ¶ms->RANK,
+ params->WORK, ¶ms->LWORK,
+ params->RWORK, params->IWORK,
+ &rv);
+ return rv;
+}
+
+static inline int
+init_@lapack_func@(GELSD_PARAMS_t *params,
+ fortran_int m,
+ fortran_int n,
+ fortran_int nrhs)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ npy_uint8 *a, *b, *s, *work, *iwork, *rwork;
+ fortran_int min_m_n = fortran_int_min(m, n);
+ fortran_int max_m_n = fortran_int_max(m, n);
+ size_t safe_min_m_n = min_m_n;
+ size_t safe_max_m_n = max_m_n;
+ size_t safe_m = m;
+ size_t safe_n = n;
+ size_t safe_nrhs = nrhs;
+
+ size_t a_size = safe_m * safe_n * sizeof(@ftyp@);
+ size_t b_size = safe_max_m_n * safe_nrhs * sizeof(@ftyp@);
+ size_t s_size = safe_min_m_n * sizeof(@frealtyp@);
+
+ fortran_int work_count;
+ size_t work_size, rwork_size, iwork_size;
+ fortran_int lda = fortran_int_max(1, m);
+ fortran_int ldb = fortran_int_max(1, fortran_int_max(m,n));
+
+ mem_buff = malloc(a_size + b_size + s_size);
+
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ b = a + a_size;
+ s = b + b_size;
+
+
+ params->M = m;
+ params->N = n;
+ params->NRHS = nrhs;
+ params->A = a;
+ params->B = b;
+ params->S = s;
+ params->LDA = lda;
+ params->LDB = ldb;
+
+ {
+ /* compute optimal work size */
+ @ftyp@ work_size_query;
+ @frealtyp@ rwork_size_query;
+ fortran_int iwork_size_query;
+
+ params->WORK = &work_size_query;
+ params->IWORK = &iwork_size_query;
+ params->RWORK = &rwork_size_query;
+ params->LWORK = -1;
+
+ if (call_@lapack_func@(params) != 0)
+ goto error;
+
+ work_count = (fortran_int)work_size_query.r;
+
+ work_size = (size_t )work_size_query.r * sizeof(@ftyp@);
+ rwork_size = (size_t)rwork_size_query * sizeof(@frealtyp@);
+ iwork_size = (size_t)iwork_size_query * sizeof(fortran_int);
+ }
+
+ mem_buff2 = malloc(work_size + rwork_size + iwork_size);
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+ rwork = work + work_size;
+ iwork = rwork + rwork_size;
+
+ params->WORK = work;
+ params->RWORK = rwork;
+ params->IWORK = iwork;
+ params->LWORK = work_count;
+
+ return 1;
+ error:
+ TRACE_TXT("%s failed init\n", __FUNCTION__);
+ free(mem_buff);
+ free(mem_buff2);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #REALTYPE=FLOAT,DOUBLE,FLOAT,DOUBLE#
+ #lapack_func=sgelsd,dgelsd,cgelsd,zgelsd#
+ #dot_func=sdot,ddot,cdotc,zdotc#
+ #typ = npy_float, npy_double, npy_cfloat, npy_cdouble#
+ #basetyp = npy_float, npy_double, npy_float, npy_double#
+ #ftyp = fortran_real, fortran_doublereal,
+ fortran_complex, fortran_doublecomplex#
+ #cmplx = 0, 0, 1, 1#
+ */
+static inline void
+release_@lapack_func@(GELSD_PARAMS_t* params)
+{
+ /* A and WORK contain allocated blocks */
+ free(params->A);
+ free(params->WORK);
+ memset(params, 0, sizeof(*params));
+}
+
+/** Compute the squared l2 norm of a contiguous vector */
+static @basetyp@
+@TYPE@_abs2(@typ@ *p, npy_intp n) {
+ npy_intp i;
+ @basetyp@ res = 0;
+ for (i = 0; i < n; i++) {
+ @typ@ el = p[i];
+#if @cmplx@
+ res += el.real*el.real + el.imag*el.imag;
+#else
+ res += el*el;
+#endif
+ }
+ return res;
+}
+
+static void
+@TYPE@_lstsq(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ GELSD_PARAMS_t params;
+ int error_occurred = get_fp_invalid_and_clear();
+ fortran_int n, m, nrhs;
+ fortran_int excess;
+
+ INIT_OUTER_LOOP_7
+
+ m = (fortran_int)dimensions[0];
+ n = (fortran_int)dimensions[1];
+ nrhs = (fortran_int)dimensions[2];
+ excess = m - n;
+
+ if (init_@lapack_func@(¶ms, m, n, nrhs)) {
+ LINEARIZE_DATA_t a_in, b_in, x_out, s_out, r_out;
+
+ init_linearize_data(&a_in, n, m, steps[1], steps[0]);
+ init_linearize_data_ex(&b_in, nrhs, m, steps[3], steps[2], fortran_int_max(n, m));
+ init_linearize_data_ex(&x_out, nrhs, n, steps[5], steps[4], fortran_int_max(n, m));
+ init_linearize_data(&r_out, 1, nrhs, 1, steps[6]);
+ init_linearize_data(&s_out, 1, fortran_int_min(n, m), 1, steps[7]);
+
+ BEGIN_OUTER_LOOP_7
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ linearize_@TYPE@_matrix(params.B, args[1], &b_in);
+ params.RCOND = args[2];
+ not_ok = call_@lapack_func@(¶ms);
+ if (!not_ok) {
+ delinearize_@TYPE@_matrix(args[3], params.B, &x_out);
+ *(npy_int*) args[5] = params.RANK;
+ delinearize_@REALTYPE@_matrix(args[6], params.S, &s_out);
+
+ /* Note that linalg.lstsq discards this when excess == 0 */
+ if (excess >= 0 && params.RANK == n) {
+ /* Compute the residuals as the square sum of each column */
+ int i;
+ char *resid = args[4];
+ @ftyp@ *components = (@ftyp@ *)params.B + n;
+ for (i = 0; i < nrhs; i++) {
+ @ftyp@ *vector = components + i*m;
+ /* Numpy and fortran floating types are the same size,
+ * so this cast is safe */
+ @basetyp@ abs2 = @TYPE@_abs2((@typ@ *)vector, excess);
+ memcpy(
+ resid + i*r_out.column_strides,
+ &abs2, sizeof(abs2));
+ }
+ }
+ else {
+ /* Note that this is always discarded by linalg.lstsq */
+ nan_@REALTYPE@_matrix(args[4], &r_out);
+ }
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[3], &x_out);
+ nan_@REALTYPE@_matrix(args[4], &r_out);
+ *(npy_int*) args[5] = -1;
+ nan_@REALTYPE@_matrix(args[6], &s_out);
+ }
+ END_OUTER_LOOP
+
+ release_@lapack_func@(¶ms);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+/**end repeat**/
+
#pragma GCC diagnostic pop
/* -------------------------------------------------------------------------- */
GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_N);
GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_S);
GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_A);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(lstsq);
GUFUNC_FUNC_ARRAY_EIG(eig);
GUFUNC_FUNC_ARRAY_EIG(eigvals);
NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE
};
+/* A, b, rcond, x, resid, rank, s, */
+static char lstsq_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE,
+};
+
typedef struct gufunc_descriptor_struct {
char *name;
char *signature;
"eigvals",
"(m,m)->(m)",
"eigvals on the last two dimension and broadcast to the rest. \n"\
- "Results in a vector of eigenvalues. \n"\
- " \"(m,m)->(m),(m,m)\" \n",
+ "Results in a vector of eigenvalues. \n",
3, 1, 1,
FUNC_ARRAY_NAME(eigvals),
eigvals_types
},
+ {
+ "lstsq_m",
+ "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(m)",
+ "least squares on the last two dimensions and broadcast to the rest. \n"\
+ "For m <= n. \n",
+ 4, 3, 4,
+ FUNC_ARRAY_NAME(lstsq),
+ lstsq_types
+ },
+ {
+ "lstsq_n",
+ "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(n)",
+ "least squares on the last two dimensions and broadcast to the rest. \n"\
+ "For m >= n, meaning that residuals are produced. \n",
+ 4, 3, 4,
+ FUNC_ARRAY_NAME(lstsq),
+ lstsq_types
+ }
};
static void
#endif
#if defined(NPY_PY3K)
-#define RETVAL m
+#define RETVAL(x) x
PyObject *PyInit__umath_linalg(void)
#else
-#define RETVAL
+#define RETVAL(x)
PyMODINIT_FUNC
init_umath_linalg(void)
#endif
m = Py_InitModule(UMATH_LINALG_MODULE_NAME, UMath_LinAlgMethods);
#endif
if (m == NULL) {
- return RETVAL;
+ return RETVAL(NULL);
}
import_array();
if (PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load _umath_linalg module.");
+ return RETVAL(NULL);
}
- return RETVAL;
+ return RETVAL(m);
}
__all__ += core.__all__
__all__ += extras.__all__
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
from __future__ import division, print_function
import timeit
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
- return masked_array(
- xnew, mask=mask, copy=copy, fill_value=value, shrink=shrink)
+ ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
+ if shrink:
+ ret.shrink_mask()
+ return ret
def masked_invalid(a, copy=True):
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
- # Number of named fields (or zero if none)
- names_ = _data.dtype.names or ()
# Type of the mask
- if names_:
- mdtype = make_mask_descr(_data.dtype)
- else:
- mdtype = MaskType
+ mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
_data._mask = mask
_data._sharedmask = False
else:
+ _data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
- _data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
- else:
- _data._sharedmask = True
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
_data._mask = mask
_data._sharedmask = not copy
else:
- if names_:
+ if _data.dtype.names:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
- return
+
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
order = "K"
_mask = _mask.astype(_mask_dtype, order)
-
+ else:
+ # Take a view so shape changes, etc., do not propagate back.
+ _mask = _mask.view()
else:
_mask = nomask
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
- Type of the returned view, e.g., ndarray or matrix. Again, the
+ Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
Notes
_mask[indx] = mindx
return
- def __setattr__(self, attr, value):
- super(MaskedArray, self).__setattr__(attr, value)
- if attr == 'dtype' and self._mask is not nomask:
- self._mask = self._mask.view(make_mask_descr(value), ndarray)
- # Try to reset the shape of the mask (if we don't have a void)
- # This raises a ValueError if the dtype change won't work
+ # Define so that we can overwrite the setter.
+ @property
+ def dtype(self):
+ return super(MaskedArray, self).dtype
+
+ @dtype.setter
+ def dtype(self, dtype):
+ super(MaskedArray, type(self)).dtype.__set__(self, dtype)
+ if self._mask is not nomask:
+ self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
+ # Try to reset the shape of the mask (if we don't have a void).
+ # This raises a ValueError if the dtype change won't work.
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
+ @property
+ def shape(self):
+ return super(MaskedArray, self).shape
+
+ @shape.setter
+ def shape(self, shape):
+ super(MaskedArray, type(self)).shape.__set__(self, shape)
+ # Cannot use self._mask, since it may not (yet) exist when a
+ # masked matrix sets the shape.
+ if getmask(self) is not nomask:
+ self._mask.shape = self.shape
+
def __setmask__(self, mask, copy=False):
"""
Set the mask.
>>> type(x.filled())
<type 'numpy.ndarray'>
- Subclassing is preserved. This means that if the data part of the masked
- array is a matrix, `filled` returns a matrix:
-
- >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
- >>> x.filled()
- matrix([[ 1, 999999],
- [999999, 4]])
+ Subclassing is preserved. This means that if, e.g., the data part of
+ the masked array is a recarray, `filled` returns a recarray:
+ >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
+ >>> m = np.ma.array(x, mask=[(True, False), (False, True)])
+ >>> m.filled()
+ rec.array([(999999, 2), ( -3, 999999)],
+ dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
originally intended.
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
- kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
- # save memory for 1d arrays
- if self.ndim == 1:
- idx = sidx
- else:
- idx = list(np.ix_(*[np.arange(x) for x in self.shape]))
- idx[axis] = sidx
- idx = tuple(idx)
-
- self[...] = self[idx]
+ self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
# precedent for this with `np.bool_` scalars.
return self
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
return np.ma.minimum_fill_value(asorted)
return s
- counts = count(asorted, axis=axis)
+ counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
- # create indexing mesh grid for all but reduced axis
- axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
- if i != axis]
- ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
+ # duplicate high if odd number of elements so mean does nothing
+ odd = counts % 2 == 1
+ l = np.where(odd, h, h-1)
- # insert indices of low and high median
- ind.insert(axis, h - 1)
- low = asorted[tuple(ind)]
- ind[axis] = np.minimum(h, asorted.shape[axis] - 1)
- high = asorted[tuple(ind)]
+ lh = np.concatenate([l,h], axis=axis)
+
+ # get low and high median
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
- rep = (~np.all(asorted.mask, axis=axis)) & s.mask
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
- replace_masked(low)
- replace_masked(high)
-
- # duplicate high if odd number of elements so mean does nothing
- odd = counts % 2 == 1
- np.copyto(low, high, where=odd)
- # not necessary for scalar True/False masks
- try:
- np.copyto(low.mask, high.mask, where=odd)
- except Exception:
- pass
+ replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
- s = np.ma.sum([low, high], axis=0, out=out)
+ s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
- s = np.ma.mean([low, high], axis=0, out=out)
+ s = np.ma.mean(low_high, axis=axis, out=out)
return s
"""
concatenate = staticmethod(concatenate)
- @staticmethod
- def makemat(arr):
- return array(arr.data.view(np.matrix), mask=arr.mask)
+ @classmethod
+ def makemat(cls, arr):
+ # There used to be a view as np.matrix here, but we may eventually
+ # deprecate that class. In preparation, we use the unmasked version
+ # to construct the matrix (with copy=False for backwards compatibility
+ # with the .view)
+ data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False)
+ return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
import pickle
import operator
import itertools
-import sys
import textwrap
+import pytest
+
from functools import reduce
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
- run_module_suite, assert_raises, assert_warns, suppress_warnings, dec
+ assert_raises, assert_warns, suppress_warnings
)
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
-from numpy.testing import dec
pi = np.pi
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
- def test_matrix_indexing(self):
- # Tests conversions and indexing
- x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
- x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
- x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
- x4 = array(x1)
- # test conversion to strings
- str(x2) # raises?
- repr(x2) # raises?
- # tests of indexing
- assert_(type(x2[1, 0]) is type(x1[1, 0]))
- assert_(x1[1, 0] == x2[1, 0])
- assert_(x2[1, 1] is masked)
- assert_equal(x1[0, 2], x2[0, 2])
- assert_equal(x1[0, 1:], x2[0, 1:])
- assert_equal(x1[:, 2], x2[:, 2])
- assert_equal(x1[:], x2[:])
- assert_equal(x1[1:], x3[1:])
- x1[0, 2] = 9
- x2[0, 2] = 9
- assert_equal(x1, x2)
- x1[0, 1:] = 99
- x2[0, 1:] = 99
- assert_equal(x1, x2)
- x2[0, 1] = masked
- assert_equal(x1, x2)
- x2[0, 1:] = masked
- assert_equal(x1, x2)
- x2[0, :] = x1[0, :]
- x2[0, 1] = masked
- assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
- x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
- assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
- x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
- assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
- assert_(allequal(x4[1], array([1, 2, 3])))
- x1 = np.matrix(np.arange(5) * 1.0)
- x2 = masked_values(x1, 3.0)
- assert_equal(x1, x2)
- assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
- assert_equal(3.0, x2.fill_value)
-
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
+ # Default for masked array is not to copy; see gh-10318.
assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
- assert_(y1a.mask is y1.mask)
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
y2 = array(x1, mask=m3)
assert_(y2._data.__array_interface__ == x1.__array_interface__)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
- a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ x = np.array([(1.0, 2), (3.0, 4)],
+ dtype=[('x', float), ('y', int)]).view(np.recarray)
+ a = masked_array(x, mask=[(True, False), (False, True)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.matrix))
+ assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
assert_(result is output)
assert_(output[0] is masked)
- def test_count_mean_with_matrix(self):
- m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
-
- assert_equal(m.count(axis=0).shape, (1,2))
- assert_equal(m.count(axis=1).shape, (2,1))
-
- #make sure broadcasting inside mean and var work
- assert_equal(m.mean(axis=0), [[2., 3.]])
- assert_equal(m.mean(axis=1), [[1.5], [3.5]])
-
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
- # test simple access
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- assert_equal(test.flat[1], 2)
- assert_equal(test.flat[2], masked)
- assert_(np.all(test.flat[0:2] == test[0, 0:2]))
- # Test flat on masked_matrices
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
- control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
- assert_equal(test, control)
- # Test setting
- test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
- testflat = test.flat
- testflat[:] = testflat[[2, 1, 0]]
- assert_equal(test, control)
- testflat[0] = 9
- assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
if i >= x.shape[-1]:
i = 0
j += 1
- # test that matrices keep the correct shape (#4615)
- a = masked_array(np.matrix(np.eye(2)), mask=0)
- b = a.flat
- b01 = b[:2]
- assert_equal(b01.data, array([[1., 0.]]))
- assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
- def test_allany_onmatrices(self):
- x = np.array([[0.13, 0.26, 0.90],
- [0.28, 0.33, 0.63],
- [0.31, 0.87, 0.70]])
- X = np.matrix(x)
- m = np.array([[True, False, False],
- [False, False, False],
- [True, True, False]], dtype=np.bool_)
- mX = masked_array(X, mask=m)
- mXbig = (mX > 0.5)
- mXsmall = (mX < 0.5)
-
- assert_(not mXbig.all())
- assert_(mXbig.any())
- assert_equal(mXbig.all(0), np.matrix([False, False, True]))
- assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
- assert_equal(mXbig.any(0), np.matrix([False, False, True]))
- assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
-
- assert_(not mXsmall.all())
- assert_(mXsmall.any())
- assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
- assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
- assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
-
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
b = a.compressed()
assert_equal(b, [2, 3, 4])
- a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
- b = a.compressed()
- assert_equal(b, a)
- assert_(isinstance(b, np.matrix))
- a[0, 0] = masked
- b = a.compressed()
- assert_equal(b, [[2, 3, 4]])
-
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
- a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
- aravel = a.ravel()
- assert_equal(aravel.shape, (1, 5))
- assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
+ def test_stable_sort(self):
+ x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8)
+ expected = array([0, 3, 1, 4, 2, 5])
+ computed = argsort(x, kind='stable')
+ assert_equal(computed, expected)
+
def test_argsort_matches_sort(self):
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
assert_equal(test.mask, mask_first.mask)
# Test sort on dtype with subarray (gh-8069)
+ # Just check that the sort does not error, structured array subarrays
+ # are treated as byte strings and that leads to differing behavior
+ # depending on endianess and `endwith`.
dt = np.dtype([('v', int, 2)])
a = a.view(dt)
- mask_last = mask_last.view(dt)
- mask_first = mask_first.view(dt)
-
test = sort(a)
- assert_equal(test, mask_last)
- assert_equal(test.mask, mask_last.mask)
-
test = sort(a, endwith=False)
- assert_equal(test, mask_first)
- assert_equal(test.mask, mask_first.mask)
def test_argsort(self):
# Test argsort
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
- @dec.knownfailureif(sys.platform=='win32' and sys.version_info < (3, 6),
- msg='Fails on Python < 3.6 (Issue #9671)')
+ @pytest.mark.skipif(sys.platform=='win32' and sys.version_info < (3, 6),
+ reason='Fails on Python < 3.6 on Windows, gh-9671')
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
- test = a.view((float, 2), np.matrix)
- assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
-
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
- test = a.view((float, 2), np.matrix)
+ test = a.view((float, 2), np.recarray)
assert_equal(test, data)
- assert_(isinstance(test, np.matrix))
+ assert_(isinstance(test, np.recarray))
assert_(not isinstance(test, MaskedArray))
+
class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
+ def test__copy(self):
+ import copy
+ assert_(
+ copy.copy(np.ma.masked) is np.ma.masked)
+
+ def test_deepcopy(self):
+ import copy
+ assert_(
+ copy.deepcopy(np.ma.masked) is np.ma.masked)
+
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
assert_raises(MaskError, int, np.ma.masked)
- @dec.skipif(sys.version_info.major == 3, "long doesn't exist in Python 3")
+ @pytest.mark.skipif(sys.version_info.major == 3,
+ reason="long doesn't exist in Python 3")
def test_coercion_long(self):
assert_raises(MaskError, long, np.ma.masked)
assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
assert_(np.isnan(a_f[()]))
- @dec.knownfailureif(True, "See gh-9750")
+ @pytest.mark.xfail(reason="See gh-9750")
def test_coercion_unicode(self):
a_u = np.zeros((), 'U10')
a_u[()] = np.ma.masked
assert_equal(a_u[()], u'--')
- @dec.knownfailureif(True, "See gh-9750")
+ @pytest.mark.xfail(reason="See gh-9750")
def test_coercion_bytes(self):
a_b = np.zeros((), 'S10')
a_b[()] = np.ma.masked
res = np.ma.masked_values(np.inf, -np.inf)
assert_equal(res.mask, False)
+ res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True)
+ assert_(res.mask is np.ma.nomask)
+
+ res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False)
+ assert_equal(res.mask, [False] * 4)
+
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True)
assert_(x_f2.flags.f_contiguous)
assert_(x_f2.mask.flags.f_contiguous)
-
-
-###############################################################################
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import run_module_suite, assert_warns
+from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
result = ma_max(data1d)
assert_equal(result, ma_max(data1d, axis=None))
assert_equal(result, ma_max(data1d, axis=0))
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- run_module_suite, assert_warns, suppress_warnings, assert_raises,
+ assert_warns, suppress_warnings, assert_raises,
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
- def test_matrix_builder(self):
- assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
-
- def test_matrix(self):
- actual = mr_['r', 1, 2, 3]
- expected = np.ma.array(np.r_['r', 1, 2, 3])
- assert_array_equal(actual, expected)
-
- # outer type is masked array, inner type is matrix
- assert_equal(type(actual), type(expected))
- assert_equal(type(actual.data), type(expected.data))
-
def test_masked_constant(self):
actual = mr_[np.ma.masked, 1]
assert_equal(actual.mask, [True, False])
assert_equal(c.shape, c_shp)
assert_array_equal(a1.mask, c[..., 0].mask)
assert_array_equal(a2.mask, c[..., 1].mask)
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy.ma as ma
from numpy import recarray
from numpy.ma import masked, nomask
-from numpy.testing import run_module_suite, temppath
+from numpy.testing import temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
dtype=[('a', int), ('b', object)])
# getting an item used to fail
y[1]
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
- run_module_suite, assert_, assert_raises, assert_equal,
+ assert_, assert_raises, assert_equal,
)
from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
assert_(y1.mask is m)
y1a = array(y1, copy=0)
- assert_(y1a.mask is y1.mask)
+ # For copy=False, one might expect that the array would just
+ # passed on, i.e., that it would be "is" instead of "==".
+ # See gh-4043 for discussion.
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
assert_(y2.mask is m3)
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
-
-if __name__ == "__main__":
- run_module_suite()
import numpy as np
from numpy.testing import (
- assert_, assert_array_equal, assert_allclose, run_module_suite,
- suppress_warnings
+ assert_, assert_array_equal, assert_allclose, suppress_warnings
)
# ddof should not have an effect (it gets cancelled out)
assert_allclose(r0.data, r1.data)
-if __name__ == "__main__":
- run_module_suite()
+ def test_mask_not_backmangled(self):
+ # See gh-10314. Test case taken from gh-3140.
+ a = np.ma.MaskedArray([1., 2.], mask=[False, False])
+ assert_(a.mask.shape == (2,))
+ b = np.tile(a, (2, 1))
+ # Check that the above no longer changes a.shape to (1, 2)
+ assert_(a.mask.shape == (2,))
+ assert_(b.shape == (2, 2))
+ assert_(b.mask.shape == (2, 2))
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_raises
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
msubarray = MSubArray
-class MMatrix(MaskedArray, np.matrix,):
-
- def __new__(cls, data, mask=nomask):
- mat = np.matrix(data)
- _data = MaskedArray.__new__(cls, data=mat, mask=mask)
- return _data
-
- def __array_finalize__(self, obj):
- np.matrix.__array_finalize__(self, obj)
- MaskedArray.__array_finalize__(self, obj)
- return
-
- def _get_series(self):
- _view = self.view(MaskedArray)
- _view._sharedmask = False
- return _view
- _series = property(fget=_get_series)
-
-mmatrix = MMatrix
-
-
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
def setup(self):
x = np.arange(5, dtype='float')
- mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
+ mx = msubarray(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
- assert_(isinstance(mx._data, np.matrix))
+ assert_(isinstance(mx._data, subarray))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
- assert_(isinstance(log(mx), mmatrix))
+ assert_(isinstance(log(mx), msubarray))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
- # Result should be a mmatrix
- assert_(isinstance(add(mx, mx), mmatrix))
- assert_(isinstance(add(mx, x), mmatrix))
+ # Result should be a msubarray
+ assert_(isinstance(add(mx, mx), msubarray))
+ assert_(isinstance(add(mx, x), msubarray))
# Result should work
assert_equal(add(mx, x), mx+x)
- assert_(isinstance(add(mx, mx)._data, np.matrix))
- assert_(isinstance(add.outer(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, mx), mmatrix))
- assert_(isinstance(hypot(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx)._data, subarray))
+ assert_(isinstance(add.outer(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, x), msubarray))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
- assert_(isinstance(divide(mx, mx), mmatrix))
- assert_(isinstance(divide(mx, x), mmatrix))
+ assert_(isinstance(divide(mx, mx), msubarray))
+ assert_(isinstance(divide(mx, x), msubarray))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
diff2 = arr1 - arr2
assert_('info' in diff2._optinfo)
assert_(diff2._optinfo['info'] == 'test')
-
-
-###############################################################################
-if __name__ == '__main__':
- run_module_suite()
import numpy.testing
from numpy.testing import (
assert_, assert_allclose, assert_array_almost_equal_nulp,
- assert_raises, build_err_msg, run_module_suite
+ assert_raises, build_err_msg
)
from .core import mask_or, getmask, masked_array, nomask, masked, filled
# masked arrays. But there was no way to tell before.
from unittest import TestCase
__some__from_testing = [
- 'TestCase', 'assert_', 'assert_allclose',
- 'assert_array_almost_equal_nulp', 'assert_raises', 'run_module_suite',
+ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp',
+ 'assert_raises'
]
__all__ = __all__masked + __some__from_testing
__all__ = defmatrix.__all__
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
+import warnings
import ast
import numpy.core.numeric as N
-from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
-from numpy.core.numerictypes import issubdtype
+from numpy.core.numeric import concatenate, isscalar
+# While not in __all__, matrix_power used to be defined here, so we import
+# it for backward compatibility.
+from numpy.linalg import matrix_power
+
def _convert_from_string(data):
for char in '[]':
"""
return matrix(data, dtype=dtype, copy=False)
-def matrix_power(M, n):
- """
- Raise a square matrix to the (integer) power `n`.
-
- For positive integers `n`, the power is computed by repeated matrix
- squarings and matrix multiplications. If ``n == 0``, the identity matrix
- of the same shape as M is returned. If ``n < 0``, the inverse
- is computed and then raised to the ``abs(n)``.
-
- Parameters
- ----------
- M : ndarray or matrix object
- Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
- with `m` a positive integer.
- n : int
- The exponent can be any integer or long integer, positive,
- negative, or zero.
-
- Returns
- -------
- M**n : ndarray or matrix object
- The return value is the same shape and type as `M`;
- if the exponent is positive or zero then the type of the
- elements is the same as those of `M`. If the exponent is
- negative the elements are floating-point.
-
- Raises
- ------
- LinAlgError
- If the matrix is not numerically invertible.
-
- See Also
- --------
- matrix
- Provides an equivalent function as the exponentiation operator
- (``**``, not ``^``).
-
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
- >>> LA.matrix_power(i, 3) # should = -i
- array([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
- matrix([[ 0, -1],
- [ 1, 0]])
- >>> LA.matrix_power(i, 0)
- array([[1, 0],
- [0, 1]])
- >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
- array([[ 0., 1.],
- [-1., 0.]])
-
- Somewhat more sophisticated example
-
- >>> q = np.zeros((4, 4))
- >>> q[0:2, 0:2] = -i
- >>> q[2:4, 2:4] = i
- >>> q # one of the three quaternion units not equal to 1
- array([[ 0., -1., 0., 0.],
- [ 1., 0., 0., 0.],
- [ 0., 0., 0., 1.],
- [ 0., 0., -1., 0.]])
- >>> LA.matrix_power(q, 2) # = -np.eye(4)
- array([[-1., 0., 0., 0.],
- [ 0., -1., 0., 0.],
- [ 0., 0., -1., 0.],
- [ 0., 0., 0., -1.]])
-
- """
- M = asanyarray(M)
- if M.ndim != 2 or M.shape[0] != M.shape[1]:
- raise ValueError("input must be a square array")
- if not issubdtype(type(n), N.integer):
- raise TypeError("exponent must be an integer")
-
- from numpy.linalg import inv
-
- if n==0:
- M = M.copy()
- M[:] = identity(M.shape[0])
- return M
- elif n<0:
- M = inv(M)
- n *= -1
-
- result = M
- if n <= 3:
- for _ in range(n-1):
- result=N.dot(result, M)
- return result
-
- # binary decomposition to reduce the number of Matrix
- # multiplications for n > 3.
- beta = binary_repr(n)
- Z, q, t = M, 0, len(beta)
- while beta[t-q-1] == '0':
- Z = N.dot(Z, Z)
- q += 1
- result = Z
- for k in range(q+1, t):
- Z = N.dot(Z, Z)
- if beta[t-k-1] == '1':
- result = N.dot(result, Z)
- return result
-
-
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
+ .. note:: It is no longer recommended to use this class, even for linear
+ algebra. Instead use regular arrays. The class may be removed
+ in the future.
+
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
+ warnings.warn('the matrix subclass is not the recommended way to '
+ 'represent matrices or deal with linear algebra (see '
+ 'https://docs.scipy.org/doc/numpy/user/'
+ 'numpy-for-matlab-users.html). '
+ 'Please adjust your code to use regular ndarray.',
+ PendingDeprecationWarning, stacklevel=2)
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
from __future__ import division, absolute_import, print_function
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
try:
- # Accessing collections abstact classes from collections
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
+try:
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import numpy as np
from numpy import matrix, asmatrix, bmat
from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_almost_equal,
- assert_array_equal, assert_array_almost_equal, assert_raises
-)
-from numpy.matrixlib.defmatrix import matrix_power
+ assert_, assert_equal, assert_almost_equal, assert_array_equal,
+ assert_array_almost_equal, assert_raises
+ )
+from numpy.linalg import matrix_power
from numpy.matrixlib import mat
class TestCtor(object):
def test_matrix_memory_sharing(self):
assert_(np.may_share_memory(self.m, self.m.ravel()))
assert_(not np.may_share_memory(self.m, self.m.flatten()))
-
-
-if __name__ == "__main__":
- run_module_suite()
--- /dev/null
+"""Tests of interaction of matrix with other parts of numpy.
+
+Note that tests with MaskedArray and linalg are done in separate files.
+"""
+from __future__ import division, absolute_import, print_function
+
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
+import textwrap
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_raises,
+ assert_raises_regex, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal)
+
+
+def test_fancy_indexing():
+ # The matrix class messes with the shape. While this is always
+ # weird (getitem is not used, it does not have setitem nor knows
+ # about fancy indexing), this tests gh-3110
+ # 2018-04-29: moved here from core.tests.test_index.
+ m = np.matrix([[1, 2], [3, 4]])
+
+ assert_(isinstance(m[[0, 1, 0], :], np.matrix))
+
+ # gh-3110. Note the transpose currently because matrices do *not*
+ # support dimension fixing for fancy indexing correctly.
+ x = np.asmatrix(np.arange(50).reshape(5, 10))
+ assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
+
+
+def test_polynomial_mapdomain():
+ # test that polynomial preserved matrix subtype.
+ # 2018-04-29: moved here from polynomial.tests.polyutils.
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ x = np.matrix([dom1, dom1])
+ res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)
+ assert_(isinstance(res, np.matrix))
+
+
+def test_sort_matrix_none():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.sort(a, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_partition_matrix_none():
+ # gh-4301
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.partition(a, 1, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_dot_scalar_and_matrix_of_objects():
+ # Ticket #2469
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.dot(arr, 3), desired)
+ assert_equal(np.dot(3, arr), desired)
+
+
+def test_inner_scalar_and_matrix():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
+ assert_equal(np.inner(arr, sca), desired)
+ assert_equal(np.inner(sca, arr), desired)
+
+
+def test_inner_scalar_and_matrix_of_objects():
+ # Ticket #4482
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.inner(arr, 3), desired)
+ assert_equal(np.inner(3, arr), desired)
+
+
+def test_iter_allocate_output_subtype():
+ # Make sure that the subtype with priority wins
+ # 2018-04-29: moved here from core.tests.test_nditer, given the
+ # matrix specific shape test.
+
+ # matrix vs ndarray
+ a = np.matrix([[1, 2], [3, 4]])
+ b = np.arange(4).reshape(2, 2).T
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_(type(i.operands[2]) is np.matrix)
+ assert_(type(i.operands[2]) is not np.ndarray)
+ assert_equal(i.operands[2].shape, (2, 2))
+
+ # matrix always wants things to be 2D
+ b = np.arange(4).reshape(1, 2, 2)
+ assert_raises(RuntimeError, np.nditer, [a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ # but if subtypes are disabled, the result can still work
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
+ assert_(type(i.operands[2]) is np.ndarray)
+ assert_(type(i.operands[2]) is not np.matrix)
+ assert_equal(i.operands[2].shape, (1, 2, 2))
+
+
+def like_function():
+ # 2018-04-29: moved here from core.tests.test_numeric
+ a = np.matrix([[1, 2], [3, 4]])
+ for like_function in np.zeros_like, np.ones_like, np.empty_like:
+ b = like_function(a)
+ assert_(type(b) is np.matrix)
+
+ c = like_function(a, subok=False)
+ assert_(type(c) is not np.matrix)
+
+
+def test_array_astype():
+ # 2018-04-29: copied here from core.tests.test_api
+ # subok=True passes through a matrix
+ a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), np.matrix)
+
+ # subok=False never returns a matrix
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not np.matrix)
+
+
+def test_stack():
+ # 2018-04-29: copied here from core.tests.test_shape_base
+ # check np.matrix cannot be stacked
+ m = np.matrix([[1, 2], [3, 4]])
+ assert_raises_regex(ValueError, 'shape too large to be a matrix',
+ np.stack, [m, m])
+
+
+def test_object_scalar_multiply():
+ # Tickets #2469 and #4482
+ # 2018-04-29: moved here from core.tests.test_ufunc
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.multiply(arr, 3), desired)
+ assert_equal(np.multiply(3, arr), desired)
+
+
+def test_nanfunctions_matrices():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in [np.nanmin, np.nanmax]:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mat[1] = np.nan
+ for f in [np.nanmin, np.nanmax]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
+ and not np.isnan(res[2, 0]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat)
+ assert_(np.isscalar(res))
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+
+def test_nanfunctions_matrices_general():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
+ np.nanmean, np.nanvar, np.nanstd):
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+
+ for f in np.nancumsum, np.nancumprod:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3*3))
+
+
+def test_average_matrix():
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ y = np.matrix(np.random.rand(5, 5))
+ assert_array_equal(y.mean(0), np.average(y, 0))
+
+ a = np.matrix([[1, 2], [3, 4]])
+ w = np.matrix([[1, 2], [3, 4]])
+
+ r = np.average(a, axis=0, weights=w)
+ assert_equal(type(r), np.matrix)
+ assert_equal(r, [[2.5, 10.0/3]])
+
+
+def test_trapz_matrix():
+ # Test to make sure matrices give the same answer as ndarrays
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ x = np.linspace(0, 5)
+ y = x * x
+ r = np.trapz(y, x)
+ mx = np.matrix(x)
+ my = np.matrix(y)
+ mr = np.trapz(my, mx)
+ assert_almost_equal(mr, r)
+
+
+def test_ediff1d_matrix():
+ # 2018-04-29: moved here from core.tests.test_arraysetops.
+ assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))
+ assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
+
+
+def test_apply_along_axis_matrix():
+ # this test is particularly malicious because matrix
+ # refuses to become 1d
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ def double(row):
+ return row * 2
+
+ m = np.matrix([[0, 1], [2, 3]])
+ expected = np.matrix([[0, 2], [4, 6]])
+
+ result = np.apply_along_axis(double, 0, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+ result = np.apply_along_axis(double, 1, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+
+def test_kron_matrix():
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ a = np.ones([2, 2])
+ m = np.asmatrix(a)
+ assert_equal(type(np.kron(a, a)), np.ndarray)
+ assert_equal(type(np.kron(m, m)), np.matrix)
+ assert_equal(type(np.kron(a, m)), np.matrix)
+ assert_equal(type(np.kron(m, a)), np.matrix)
+
+
+class TestConcatenatorMatrix(object):
+ # 2018-04-29: moved here from core.tests.test_index_tricks.
+ def test_matrix(self):
+ a = [1, 2]
+ b = [3, 4]
+
+ ab_r = np.r_['r', a, b]
+ ab_c = np.r_['c', a, b]
+
+ assert_equal(type(ab_r), np.matrix)
+ assert_equal(type(ab_c), np.matrix)
+
+ assert_equal(np.array(ab_r), [[1, 2, 3, 4]])
+ assert_equal(np.array(ab_c), [[1], [2], [3], [4]])
+
+ assert_raises(ValueError, lambda: np.r_['rc', a, b])
+
+ def test_matrix_scalar(self):
+ r = np.r_['r', [1, 2], 3]
+ assert_equal(type(r), np.matrix)
+ assert_equal(np.array(r), [[1, 2, 3]])
+
+ def test_matrix_builder(self):
+ a = np.array([1])
+ b = np.array([2])
+ c = np.array([3])
+ d = np.array([4])
+ actual = np.r_['a, b; c, d']
+ expected = np.bmat([[a, b], [c, d]])
+
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+
+def test_array_equal_error_message_matrix():
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ try:
+ assert_equal(np.array([1, 2]), np.matrix([1, 2]))
+ except AssertionError as e:
+ msg = str(e)
+ msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+ msg_reference = textwrap.dedent("""\
+
+ Arrays are not equal
+
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: matrix([[1, 2]])""")
+ try:
+ assert_equal(msg, msg_reference)
+ except AssertionError:
+ assert_equal(msg2, msg_reference)
+ else:
+ raise AssertionError("Did not raise")
+
+
+def test_array_almost_equal_matrix():
+ # Matrix slicing keeps things 2-D, while array does not necessarily.
+ # See gh-8452.
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ m1 = np.matrix([[1., 2.]])
+ m2 = np.matrix([[1., np.nan]])
+ m3 = np.matrix([[1., -np.inf]])
+ m4 = np.matrix([[np.nan, np.inf]])
+ m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
+ for assert_func in assert_array_almost_equal, assert_almost_equal:
+ for m in m1, m2, m3, m4, m5:
+ assert_func(m, m)
+ a = np.array(m)
+ assert_func(a, m)
+ assert_func(m, a)
--- /dev/null
+from __future__ import division, absolute_import, print_function
+
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
+import pickle
+
+import numpy as np
+from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
+ assert_array_equal)
+from numpy.ma.core import (masked_array, masked_values, masked, allequal,
+ MaskType, getmask, MaskedArray, nomask,
+ log, add, hypot, divide)
+from numpy.ma.extras import mr_
+
+
+class MMatrix(MaskedArray, np.matrix,):
+
+ def __new__(cls, data, mask=nomask):
+ mat = np.matrix(data)
+ _data = MaskedArray.__new__(cls, data=mat, mask=mask)
+ return _data
+
+ def __array_finalize__(self, obj):
+ np.matrix.__array_finalize__(self, obj)
+ MaskedArray.__array_finalize__(self, obj)
+ return
+
+ def _get_series(self):
+ _view = self.view(MaskedArray)
+ _view._sharedmask = False
+ return _view
+ _series = property(fget=_get_series)
+
+
+class TestMaskedMatrix(object):
+ def test_matrix_indexing(self):
+ # Tests conversions and indexing
+ x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
+ x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]])
+ x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]])
+ x4 = masked_array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ # tests of indexing
+ assert_(type(x2[1, 0]) is type(x1[1, 0]))
+ assert_(x1[1, 0] == x2[1, 0])
+ assert_(x2[1, 1] is masked)
+ assert_equal(x1[0, 2], x2[0, 2])
+ assert_equal(x1[0, 1:], x2[0, 1:])
+ assert_equal(x1[:, 2], x2[:, 2])
+ assert_equal(x1[:], x2[:])
+ assert_equal(x1[1:], x3[1:])
+ x1[0, 2] = 9
+ x2[0, 2] = 9
+ assert_equal(x1, x2)
+ x1[0, 1:] = 99
+ x2[0, 1:] = 99
+ assert_equal(x1, x2)
+ x2[0, 1] = masked
+ assert_equal(x1, x2)
+ x2[0, 1:] = masked
+ assert_equal(x1, x2)
+ x2[0, :] = x1[0, :]
+ x2[0, 1] = masked
+ assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
+ x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0])))
+ assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0])))
+ x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0])))
+ assert_(allequal(x4[1], masked_array([1, 2, 3])))
+ x1 = np.matrix(np.arange(5) * 1.0)
+ x2 = masked_values(x1, 3.0)
+ assert_equal(x1, x2)
+ assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType),
+ x2.mask))
+ assert_equal(3.0, x2.fill_value)
+
+ def test_pickling_subbaseclass(self):
+ # Test pickling w/ a subclass of ndarray
+ a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ a_pickled = pickle.loads(a.dumps())
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.matrix))
+
+ def test_count_mean_with_matrix(self):
+ m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
+
+ assert_equal(m.count(axis=0).shape, (1, 2))
+ assert_equal(m.count(axis=1).shape, (2, 1))
+
+ # Make sure broadcasting inside mean and var work
+ assert_equal(m.mean(axis=0), [[2., 3.]])
+ assert_equal(m.mean(axis=1), [[1.5], [3.5]])
+
+ def test_flat(self):
+ # Test that flat can return items even for matrices [#4585, #4615]
+ # test simple access
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ assert_equal(test.flat[1], 2)
+ assert_equal(test.flat[2], masked)
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
+ # Test flat on masked_matrices
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
+ control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
+ assert_equal(test, control)
+ # Test setting
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ testflat = test.flat
+ testflat[:] = testflat[[2, 1, 0]]
+ assert_equal(test, control)
+ testflat[0] = 9
+ # test that matrices keep the correct shape (#4615)
+ a = masked_array(np.matrix(np.eye(2)), mask=0)
+ b = a.flat
+ b01 = b[:2]
+ assert_equal(b01.data, np.array([[1., 0.]]))
+ assert_equal(b01.mask, np.array([[False, False]]))
+
+ def test_allany_onmatrices(self):
+ x = np.array([[0.13, 0.26, 0.90],
+ [0.28, 0.33, 0.63],
+ [0.31, 0.87, 0.70]])
+ X = np.matrix(x)
+ m = np.array([[True, False, False],
+ [False, False, False],
+ [True, True, False]], dtype=np.bool_)
+ mX = masked_array(X, mask=m)
+ mXbig = (mX > 0.5)
+ mXsmall = (mX < 0.5)
+
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
+ assert_equal(mXbig.all(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
+ assert_equal(mXbig.any(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
+
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
+ assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
+ assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
+
+ def test_compressed(self):
+ a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
+ b = a.compressed()
+ assert_equal(b, a)
+ assert_(isinstance(b, np.matrix))
+ a[0, 0] = masked
+ b = a.compressed()
+ assert_equal(b, [[2, 3, 4]])
+
+ def test_ravel(self):
+ a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
+ aravel = a.ravel()
+ assert_equal(aravel.shape, (1, 5))
+ assert_equal(aravel._mask.shape, a.shape)
+
+ def test_view(self):
+ # Test view w/ flexible dtype
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = masked_array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ test = a.view((float, 2), np.matrix)
+ assert_equal(test, data)
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
+
+
+class TestSubclassing(object):
+ # Test suite for masked subclasses of ndarray.
+
+ def setup(self):
+ x = np.arange(5, dtype='float')
+ mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
+ self.data = (x, mx)
+
+ def test_maskedarray_subclassing(self):
+ # Tests subclassing MaskedArray
+ (x, mx) = self.data
+ assert_(isinstance(mx._data, np.matrix))
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (x, mx) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(log(mx), MMatrix))
+ assert_equal(log(x), np.log(x))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (x, mx) = self.data
+ # Result should be a MMatrix
+ assert_(isinstance(add(mx, mx), MMatrix))
+ assert_(isinstance(add(mx, x), MMatrix))
+ # Result should work
+ assert_equal(add(mx, x), mx+x)
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ assert_(isinstance(add.outer(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, x), MMatrix))
+
+ def test_masked_binary_operations2(self):
+ # Tests domained_masked_binary_operation
+ (x, mx) = self.data
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
+ assert_(isinstance(divide(mx, mx), MMatrix))
+ assert_(isinstance(divide(mx, x), MMatrix))
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+class TestConcatenator(object):
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_matrix_builder(self):
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
+
+ def test_matrix(self):
+ # Test consistency with unmasked version. If we ever deprecate
+ # matrix, this test should either still pass, or both actual and
+ # expected should fail to be build.
+ actual = mr_['r', 1, 2, 3]
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
+ assert_array_equal(actual, expected)
+
+ # outer type is masked array, inner type is matrix
+ assert_equal(type(actual), type(expected))
+ assert_equal(type(actual.data), type(expected.data))
--- /dev/null
+""" Test functions for linalg module using the matrix class."""
+from __future__ import division, absolute_import, print_function
+
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
+import numpy as np
+
+from numpy.linalg.tests.test_linalg import (
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
+ PinvCases, DetCases, LstsqCases)
+
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("0x0_matrix",
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
+ tags={'size-0'}),
+ LinalgCase("matrix_b_only",
+ np.array([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+ LinalgCase("matrix_a_and_b",
+ np.matrix([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hmatrix_a_and_b",
+ np.matrix([[1., 2.], [2., 1.]]),
+ None),
+])
+# No need to make generalized or strided cases for matrices.
+
+
+class MatrixTestCase(LinalgTestCase):
+ TEST_CASES = CASES
+
+
+class TestSolveMatrix(SolveCases, MatrixTestCase):
+ pass
+
+
+class TestInvMatrix(InvCases, MatrixTestCase):
+ pass
+
+
+class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
+ pass
+
+
+class TestEigMatrix(EigCases, MatrixTestCase):
+ pass
+
+
+class TestSVDMatrix(SVDCases, MatrixTestCase):
+ pass
+
+
+class TestCondMatrix(CondCases, MatrixTestCase):
+ pass
+
+
+class TestPinvMatrix(PinvCases, MatrixTestCase):
+ pass
+
+
+class TestDetMatrix(DetCases, MatrixTestCase):
+ pass
+
+
+class TestLstsqMatrix(LstsqCases, MatrixTestCase):
+ pass
+
+
+class _TestNorm2DMatrix(_TestNorm2D):
+ array = np.matrix
+
+
+class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
+ pass
+
+
+class TestQRMatrix(_TestQR):
+ array = np.matrix
from __future__ import division, absolute_import, print_function
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_array_equal
-)
+from numpy.testing import assert_, assert_equal, assert_array_equal
class TestView(object):
def test_type(self):
assert_(isinstance(y, np.matrix))
assert_equal(y.dtype, np.dtype('<i2'))
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
import numpy as np
-from numpy.testing import assert_equal, run_module_suite
+from numpy.testing import assert_equal
class TestDot(object):
def test_matscalar(self):
assert_equal(b1.diagonal(), diag_b1)
assert_equal(np.diagonal(b1), array_b1)
assert_equal(np.diag(b1), array_b1)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_, assert_equal, assert_raises
- )
+from numpy.testing import assert_, assert_equal, assert_raises
class TestRegression(object):
x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
assert_equal(x.std().shape, ())
assert_equal(x.argmax().shape, ())
-
-if __name__ == "__main__":
- run_module_suite()
from .hermite_e import HermiteE
from .laguerre import Laguerre
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
-
-if __name__ == "__main__":
- run_module_suite()
import operator as op
from numbers import Number
+import pytest
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite)
+ )
from numpy.compat import long
+#
+# fixtures
+#
+
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
- Hermite, HermiteE)
-
-
-def test_class_methods():
- for Poly1 in classes:
- for Poly2 in classes:
- yield check_conversion, Poly1, Poly2
- yield check_cast, Poly1, Poly2
- for Poly in classes:
- yield check_call, Poly
- yield check_identity, Poly
- yield check_basis, Poly
- yield check_fromroots, Poly
- yield check_fit, Poly
- yield check_equal, Poly
- yield check_not_equal, Poly
- yield check_add, Poly
- yield check_sub, Poly
- yield check_mul, Poly
- yield check_floordiv, Poly
- yield check_truediv, Poly
- yield check_mod, Poly
- yield check_divmod, Poly
- yield check_pow, Poly
- yield check_integ, Poly
- yield check_deriv, Poly
- yield check_roots, Poly
- yield check_linspace, Poly
- yield check_mapparms, Poly
- yield check_degree, Poly
- yield check_copy, Poly
- yield check_cutdeg, Poly
- yield check_truncate, Poly
- yield check_trim, Poly
- yield check_ufunc_override, Poly
+ Hermite, HermiteE
+ )
+classids = tuple(cls.__name__ for cls in classes)
+@pytest.fixture(params=classes, ids=classids)
+def Poly(request):
+ return request.param
#
# helper functions
#
-# conversion methods that depend on two classes
+# Test conversion methods that depend on combinations of two classes.
#
+Poly1 = Poly
+Poly2 = Poly
-def check_conversion(Poly1, Poly2):
+
+def test_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
assert_almost_equal(p2(x), p1(x))
-def check_cast(Poly1, Poly2):
+def test_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
#
-# methods that depend on one class
+# test methods that depend on one class
#
-def check_identity(Poly):
+def test_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
assert_almost_equal(p(x), x)
-def check_basis(Poly):
+def test_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.coef, [0]*5 + [1])
-def check_fromroots(Poly):
+def test_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
assert_almost_equal(p2.coef[-1], 1)
-def check_fit(Poly):
+def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
assert_almost_equal(p2(x), p3(x))
-def check_equal(Poly):
+def test_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
assert_(not p1 == p4)
-def check_not_equal(Poly):
+def test_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
assert_(p1 != p4)
-def check_add(Poly):
+def test_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
assert_raises(TypeError, op.add, p1, Polynomial([0]))
-def check_sub(Poly):
+def test_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
-def check_mul(Poly):
+def test_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
-def check_floordiv(Poly):
+def test_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
-def check_truediv(Poly):
+def test_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
assert_raises(TypeError, op.truediv, p2, ptype(1))
-def check_mod(Poly):
+def test_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
-def check_divmod(Poly):
+def test_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
assert_raises(TypeError, divmod, p1, Polynomial([0]))
-def check_roots(Poly):
+def test_roots(Poly):
d = Poly.domain * 1.25 + .25
w = Poly.window
tgt = np.linspace(d[0], d[1], 5)
assert_almost_equal(res, tgt)
-def check_degree(Poly):
+def test_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
-def check_copy(Poly):
+def test_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1.window is not p2.window)
-def check_integ(Poly):
+def test_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
-def check_deriv(Poly):
+def test_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
assert_almost_equal(p2.deriv(2).coef, p1.coef)
-def check_linspace(Poly):
+def test_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
assert_almost_equal(yres, ytgt)
-def check_pow(Poly):
+def test_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
assert_raises(ValueError, op.pow, tgt, -1)
-def check_call(Poly):
+def test_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
assert_almost_equal(res, tgt)
-def check_cutdeg(Poly):
+def test_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(0)), 1)
-def check_truncate(Poly):
+def test_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(1)), 1)
-def check_trim(Poly):
+def test_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-5).coef, c[:1])
-def check_mapparms(Poly):
+def test_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
assert_almost_equal([1, 2], p.mapparms())
-def check_ufunc_override(Poly):
+def test_ufunc_override(Poly):
p = Poly([1, 2, 3])
x = np.ones(3)
assert_raises(TypeError, np.add, p, x)
assert_raises(TypeError, np.add, x, p)
+#
+# Test class method that only exists for some classes
+#
+
+
class TestInterpolate(object):
def f(self, x):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
assert_almost_equal(p(x), powx(x, t), decimal=12)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
H0 = np.array([1])
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
He0 = np.array([1])
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
L0 = np.array([1])/1
tgt = np.exp(-x)
res = lag.lagweight(x)
assert_almost_equal(res, tgt)
-
-
-if __name__ == "__main__":
- run_module_suite()
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
L0 = np.array([1])
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy.polynomial.polynomial as poly
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
def test_polyline(self):
assert_equal(poly.polyline(3, 4), [3, 4])
-
-
-if __name__ == "__main__":
- run_module_suite()
import numpy.polynomial.polyutils as pu
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- run_module_suite
)
dom1 = [0, 4]
dom2 = [1, 3]
tgt = dom2
- res = pu. mapdomain(dom1, dom1, dom2)
+ res = pu.mapdomain(dom1, dom1, dom2)
assert_almost_equal(res, tgt)
# test for complex values
assert_almost_equal(res, tgt)
# test that subtypes are preserved.
+ class MyNDArray(np.ndarray):
+ pass
+
dom1 = [0, 4]
dom2 = [1, 3]
- x = np.matrix([dom1, dom1])
+ x = np.array([dom1, dom1]).view(MyNDArray)
res = pu.mapdomain(x, dom1, dom2)
- assert_(isinstance(res, np.matrix))
+ assert_(isinstance(res, MyNDArray))
def test_mapparms(self):
# test for real values
tgt = [-1 + 1j, 1 - 1j]
res = pu.mapparms(dom1, dom2)
assert_almost_equal(res, tgt)
-
-
-if __name__ == "__main__":
- run_module_suite()
from __future__ import division, absolute_import, print_function
import numpy.polynomial as poly
-from numpy.testing import run_module_suite, assert_equal
+from numpy.testing import assert_equal
class TestStr(object):
res = repr(poly.Laguerre([0, 1]))
tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])'
assert_equal(res, tgt)
-
-
-#
-
-if __name__ == "__main__":
- run_module_suite()
"""
return RandomState(seed=0)
-from numpy.testing import _numpy_tester
-test = _numpy_tester().test
-bench = _numpy_tester().bench
+from numpy.testing._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 15, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 15, density=True)
>>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
>>> plt.show()
See Also
--------
- random.standard_normal : Similar, but takes a tuple as its argument.
+ standard_normal : Similar, but takes a tuple as its argument.
Notes
-----
See Also
--------
- random.randint : Similar to `random_integers`, only for the half-open
+ randint : Similar to `random_integers`, only for the half-open
interval [`low`, `high`), and 0 is the lowest value if `high` is
omitted.
Display results as a histogram:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(dsums, 11, normed=True)
+ >>> count, bins, ignored = plt.hist(dsums, 11, density=True)
>>> plt.show()
"""
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
... linewidth=2, color='r')
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
>>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\
... (sps.gamma(shape) * scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
>>> import matplotlib.pyplot as plt
>>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
>>> y = bins**(shape-1)*(np.exp(-bins/scale) /
... (sps.gamma(shape)*scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
>>> dfden = 20 # within groups degrees of freedom
>>> nonc = 3.0
>>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)
- >>> NF = np.histogram(nc_vals, bins=50, normed=True)
+ >>> NF = np.histogram(nc_vals, bins=50, density=True)
>>> c_vals = np.random.f(dfnum, dfden, 1000000)
- >>> F = np.histogram(c_vals, bins=50, normed=True)
+ >>> F = np.histogram(c_vals, bins=50, density=True)
>>> plt.plot(F[1][1:], F[0])
>>> plt.plot(NF[1][1:], NF[0])
>>> plt.show()
>>> import matplotlib.pyplot as plt
>>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
- ... bins=200, normed=True)
+ ... bins=200, density=True)
>>> plt.show()
Draw values from a noncentral chisquare with very small noncentrality,
>>> plt.figure()
>>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),
- ... bins=np.arange(0., 25, .1), normed=True)
+ ... bins=np.arange(0., 25, .1), density=True)
>>> values2 = plt.hist(np.random.chisquare(3, 100000),
- ... bins=np.arange(0., 25, .1), normed=True)
+ ... bins=np.arange(0., 25, .1), density=True)
>>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
>>> plt.show()
>>> plt.figure()
>>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
- ... bins=200, normed=True)
+ ... bins=200, density=True)
>>> plt.show()
"""
>>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(s, bins=100, normed=True)
+ >>> h = plt.hist(s, bins=100, density=True)
For a one-sided t-test, how far out in the distribution does the t
statistic appear?
>>> import matplotlib.pyplot as plt
>>> from scipy.special import i0
- >>> plt.hist(s, 50, normed=True)
+ >>> plt.hist(s, 50, density=True)
>>> x = np.linspace(-np.pi, np.pi, num=51)
>>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))
>>> plt.plot(x, y, linewidth=2, color='r')
density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, _ = plt.hist(s, 100, normed=True)
+ >>> count, bins, _ = plt.hist(s, 100, density=True)
>>> fit = a*m**a / bins**(a+1)
>>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
>>> plt.show()
>>> powpdf = stats.powerlaw.pdf(xx,5)
>>> plt.figure()
- >>> plt.hist(rvs, bins=50, normed=True)
+ >>> plt.hist(rvs, bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('np.random.power(5)')
>>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of 1 + np.random.pareto(5)')
>>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
>>> plt.title('inverse of stats.pareto(5)')
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> x = np.arange(-8., 8., .01)
>>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
>>> plt.plot(x, pdf)
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp( -np.exp( -(bins - mu) /beta) ),
... linewidth=2, color='r')
... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
- >>> count, bins, ignored = plt.hist(maxima, 30, normed=True)
+ >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
>>> beta = np.std(maxima) * np.sqrt(6) / np.pi
>>> mu = np.mean(maxima) - 0.57721*beta
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')
+ >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
>>> x = np.linspace(min(bins), max(bins), 10000)
>>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
... b.append(np.product(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
- >>> count, bins, ignored = plt.hist(b, 100, normed=True, align='mid')
+ >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
>>> sigma = np.std(np.log(b))
>>> mu = np.mean(np.log(b))
--------
Draw values from the distribution and plot the histogram
- >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)
+ >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
height is 1 meter, what fraction of waves are likely to be larger than 3
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)
+ >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, density=True)
>>> plt.show()
"""
>>> import matplotlib.pyplot as plt
>>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,
- ... normed=True)
+ ... density=True)
>>> plt.show()
"""
Draw samples from a negative binomial distribution.
Samples are drawn from a negative binomial distribution with specified
- parameters, `n` trials and `p` probability of success where `n` is an
+ parameters, `n` successes and `p` probability of success where `n` is an
integer > 0 and `p` is in the interval [0, 1].
Parameters
-------
out : ndarray or scalar
Drawn samples from the parameterized negative binomial distribution,
- where each sample is equal to N, the number of trials it took to
- achieve n - 1 successes, N - (n - 1) failures, and a success on the,
- (N + n)th trial.
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
Notes
-----
The probability density for the negative binomial distribution is
- .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},
+ .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
- where :math:`n-1` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n-1` is the number of trials.
- The negative binomial distribution gives the probability of n-1
- successes and N failures in N+n-1 trials, and success on the (N+n)th
- trial.
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, and :math:`N+n` is the number of trials.
+ The negative binomial distribution gives the probability of N
+ failures given n successes, with a success on the last trial.
If one throws a die repeatedly until the third time a "1" appears,
then the probability distribution of the number of non-"1"s that
Display histogram of the sample:
>>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 14, normed=True)
+ >>> count, bins, ignored = plt.hist(s, 14, density=True)
>>> plt.show()
Draw each 100 values for lambda 100 and 500:
Truncate s values at 50 so plot is interesting:
- >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)
+ >>> count, bins, ignored = plt.hist(s[s<50], 50, density=True)
>>> x = np.arange(1., 50.)
>>> y = x**(-a) / special.zetac(a)
>>> plt.plot(x, y/max(y), linewidth=2, color='r')
"""
if isinstance(x, (int, long, np.integer)):
arr = np.arange(x)
- else:
- arr = np.array(x)
- self.shuffle(arr)
- return arr
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # must return a copy
+ if arr is x:
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[0], dtype=np.intp)
+ self.shuffle(idx)
+ return arr[idx]
+
_rand = RandomState()
seed = _rand.seed
('_LARGEFILE64_SOURCE', '1')]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
+ # fix for 0.26 < cython < 0.29 and perhaps 0.28.5
+ # see https://github.com/cython/cython/issues/2494
+ defs.append(('CYTHON_SMALL_CODE', ''))
libs = []
# Configure mtrand
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
+ assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
-
-if __name__ == "__main__":
- run_module_suite()
import sys
from numpy.testing import (
- run_module_suite, assert_, assert_array_equal, assert_raises,
+ assert_, assert_array_equal, assert_raises,
)
from numpy import random
from numpy.compat import long
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
-
-if __name__ == "__main__":
- run_module_suite()
from unittest import TestCase
-from . import decorators as dec
-from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester
-from .utils import *
-test = _numpy_tester().test
+from ._private.utils import *
+from ._private import decorators as dec
+from ._private.nosetester import (
+ run_module_suite, NoseTester as Tester
+ )
+
+__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
+
+from ._private.pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
--- /dev/null
+"""
+Decorators for labeling and modifying behavior of test objects.
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+::
+
+ nose.tools.make_decorator(original_function)(decorator)
+
+in returning the decorator, in order to preserve meta-data such as
+function name, setup and teardown functions and so on - see
+``nose.tools`` for more information.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+
+from .utils import SkipTest, assert_warns, HAS_REFCOUNT
+
+__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
+ 'parametrize', '_needs_refcount',]
+
+
+def slow(t):
+ """
+ Label a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consists of
+ thousands of tests, so even a second is significant).
+
+ Parameters
+ ----------
+ t : callable
+ The test to label as slow.
+
+ Returns
+ -------
+ t : callable
+ The decorated test `t`.
+
+ Examples
+ --------
+ The `numpy.testing` module includes ``import decorators as dec``.
+ A test can be decorated as slow like this::
+
+ from numpy.testing import *
+
+ @dec.slow
+ def test_big(self):
+ print('Big, slow test')
+
+ """
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ """
+ Signals to nose that this function is or is not a test.
+
+ Parameters
+ ----------
+ tf : bool
+ If True, specifies that the decorated callable is a test.
+ If False, specifies that the decorated callable is not a test.
+ Default is True.
+
+ Notes
+ -----
+ This decorator can't use the nose namespace, because it can be
+ called from a non-test module. See also ``istest`` and ``nottest`` in
+ ``nose.tools``.
+
+ Examples
+ --------
+ `setastest` can be used in the following way::
+
+ from numpy.testing import dec
+
+ @dec.setastest(False)
+ def func_with_test_in_name(arg1, arg2):
+ pass
+
+ """
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ """
+ Make function raise SkipTest exception if a given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ skip_condition : bool or callable
+ Flag to determine whether to skip the decorated test.
+ msg : str, optional
+ Message to give on raising a SkipTest exception. Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Allow for both boolean or callable skip conditions.
+ if isinstance(skip_condition, collections_abc.Callable):
+ skip_val = lambda: skip_condition()
+ else:
+ skip_val = lambda: skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None:
+ out = 'Test skipped due to test condition'
+ else:
+ out = msg
+
+ return "Skipping test: %s: %s" % (func.__name__, out)
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ for x in f(*args, **kwargs):
+ yield x
+
+ # Choose the right skipper to use when building the actual decorator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+
+def knownfailureif(fail_condition, msg=None):
+ """
+ Make function raise KnownFailureException exception if given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ fail_condition : bool or callable
+ Flag to determine whether to mark the decorated test as a known
+ failure (if True) or not (if False).
+ msg : str, optional
+ Message to give on raising a KnownFailureException exception.
+ Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes
+ KnownFailureException to be raised when `fail_condition` is True,
+ and the function to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+ if msg is None:
+ msg = 'Test skipped due to known failure'
+
+ # Allow for both boolean or callable known failure conditions.
+ if isinstance(fail_condition, collections_abc.Callable):
+ fail_val = lambda: fail_condition()
+ else:
+ fail_val = lambda: fail_condition
+
+ def knownfail_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ from .noseclasses import KnownFailureException
+
+ def knownfailer(*args, **kwargs):
+ if fail_val():
+ raise KnownFailureException(msg)
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(knownfailer)
+
+ return knownfail_decorator
+
+def deprecated(conditional=True):
+ """
+ Filter deprecation warnings while running the test suite.
+
+ This decorator can be used to filter DeprecationWarning's, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable, optional
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision. Default is True.
+
+ Returns
+ -------
+ decorator : function
+ The `deprecated` decorator itself.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ with assert_warns(DeprecationWarning):
+ f(*args, **kwargs)
+
+ if isinstance(conditional, collections_abc.Callable):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
+
+
+def parametrize(vars, input):
+ """
+ Pytest compatibility class. This implements the simplest level of
+ pytest.mark.parametrize for use in nose as an aid in making the transition
+ to pytest. It achieves that by adding a dummy var parameter and ignoring
+ the doc_func parameter of the base class. It does not support variable
+ substitution by name, nor does it support nesting or classes. See the
+ pytest documentation for usage.
+
+ .. versionadded:: 1.14.0
+
+ """
+ from .parameterized import parameterized
+
+ return parameterized(input)
+
+_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
--- /dev/null
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
+# Because this module imports nose directly, it should not
+# be used except by nosetester.py to avoid a general NumPy
+# dependency on nose.
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import doctest
+import inspect
+
+import numpy
+import nose
+from nose.plugins import doctests as npd
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from nose.plugins.base import Plugin
+from nose.util import src
+from .nosetester import get_package_name
+from .utils import KnownFailureException, KnownFailureTest
+
+
+# Some of the classes in this module begin with 'Numpy' to clearly distinguish
+# them from the plethora of very similar names from nose/unittest/doctest
+
+#-----------------------------------------------------------------------------
+# Modified version of the one in the stdlib, that fixes a python bug (doctests
+# not found in extension modules, http://bugs.python.org/issue3158)
+class NumpyDocTestFinder(doctest.DocTestFinder):
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isbuiltin(object):
+ return module.__name__ == object.__module__
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.ismethod(object):
+ # This one may be a bug in cython that fails to correctly set the
+ # __module__ attribute of methods, but since the same error is easy
+ # to make by extension code writers, having this safety in place
+ # isn't such a bad idea
+ return module.__name__ == object.__self__.__class__.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+
+ doctest.DocTestFinder._find(self, tests, obj, name, module,
+ source_lines, globs, seen)
+
+ # Below we re-run pieces of the above method with manual modifications,
+ # because the original code is buggy and fails to correctly identify
+ # doctests in extension modules.
+
+ # Local shorthands
+ from inspect import (
+ isroutine, isclass, ismodule, isfunction, ismethod
+ )
+
+ # Look for tests in a module's contained objects.
+ if ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname1 = '%s.%s' % (name, valname)
+ if ( (isroutine(val) or isclass(val))
+ and self._from_module(module, val)):
+
+ self._find(tests, val, valname1, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((isfunction(val) or isclass(val) or
+ ismethod(val) or isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+
+# second-chance checker; if the default comparison doesn't
+# pass, then see if the expected output string contains flags that
+# tell us to ignore the output
+class NumpyOutputChecker(doctest.OutputChecker):
+ def check_output(self, want, got, optionflags):
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret:
+ if "#random" in want:
+ return True
+
+ # it would be useful to normalize endianness so that
+ # bigendian machines don't fail all the tests (and there are
+ # actually some bigendian examples in the doctests). Let's try
+ # making them all little endian
+ got = got.replace("'>", "'<")
+ want = want.replace("'>", "'<")
+
+ # try to normalize out 32 and 64 bit default int sizes
+ for sz in [4, 8]:
+ got = got.replace("'<i%d'" % sz, "int")
+ want = want.replace("'<i%d'" % sz, "int")
+
+ ret = doctest.OutputChecker.check_output(self, want,
+ got, optionflags)
+
+ return ret
+
+
+# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
+# its constructor that blocks non-default arguments from being passed
+# down into doctest.DocTestCase
+class NumpyDocTestCase(npd.DocTestCase):
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ doctest.DocTestCase.__init__(self, test,
+ optionflags=optionflags,
+ setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+
+print_state = numpy.get_printoptions()
+
+class NumpyDoctest(npd.Doctest):
+ name = 'numpydoctest' # call nosetests with --with-numpydoctest
+ score = 1000 # load late, after doctest builtin
+
+ # always use whitespace and ellipsis options for doctests
+ doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ # files that should be ignored for doctests
+ doctest_ignore = ['generate_numpy_api.py',
+ 'setup.py']
+
+ # Custom classes; class variables to allow subclassing
+ doctest_case_class = NumpyDocTestCase
+ out_check_class = NumpyOutputChecker
+ test_finder_class = NumpyDocTestFinder
+
+ # Don't use the standard doctest option handler; hard-code the option values
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+ # Test doctests in 'test' files / directories. Standard plugin default
+ # is False
+ self.doctest_tests = True
+ # Variable name; if defined, doctest results stored in this variable in
+ # the top-level namespace. None is the standard default
+ self.doctest_result_var = None
+
+ def configure(self, options, config):
+ # parent method sets enabled flag from command line --with-numpydoctest
+ Plugin.configure(self, options, config)
+ self.finder = self.test_finder_class()
+ self.parser = doctest.DocTestParser()
+ if self.enabled:
+ # Pull standard doctest out of plugin list; there's no reason to run
+ # both. In practice the Unplugger plugin above would cover us when
+ # run from a standard numpy.test() call; this is just in case
+ # someone wants to run our plugin outside the numpy.test() machinery
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+
+ def set_test_context(self, test):
+ """ Configure `test` object to set test context
+
+ We set the numpy / scipy standard doctest namespace
+
+ Parameters
+ ----------
+ test : test object
+ with ``globs`` dictionary defining namespace
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ `test` object modified in place
+ """
+ # set the namespace for tests
+ pkg_name = get_package_name(os.path.dirname(test.filename))
+
+ # Each doctest should execute in an environment equivalent to
+ # starting Python and executing "import numpy as np", and,
+ # for SciPy packages, an additional import of the local
+ # package (so that scipy.linalg.basic.py's doctests have an
+ # implicit "from scipy import linalg" as well.
+ #
+ # Note: __file__ allows the doctest in NoseTester to run
+ # without producing an error
+ test.globs = {'__builtins__':__builtins__,
+ '__file__':'__main__',
+ '__name__':'__main__',
+ 'np':numpy}
+ # add appropriate scipy import for SciPy tests
+ if 'scipy' in pkg_name:
+ p = pkg_name.split('.')
+ p2 = p[-1]
+ test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
+
+ # Override test loading to customize test context (with set_test_context
+ # method), set standard docstring options, and install our own test output
+ # checker
+ def loadTestsFromModule(self, module):
+ if not self.matches(module.__name__):
+ npd.log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ # nose allows module.__test__ = False; doctest does not and
+ # throws AttributeError
+ return
+ if not tests:
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ # Set test namespace; test altered in place
+ self.set_test_context(test)
+ yield self.doctest_case_class(test,
+ optionflags=self.doctest_optflags,
+ checker=self.out_check_class(),
+ result_var=self.doctest_result_var)
+
+ # Add an afterContext method to nose.plugins.doctests.Doctest in order
+ # to restore print options to the original state after each doctest
+ def afterContext(self):
+ numpy.set_printoptions(**print_state)
+
+ # Ignore NumPy-specific build files that shouldn't be searched for tests
+ def wantFile(self, file):
+ bn = os.path.basename(file)
+ if bn in self.doctest_ignore:
+ return False
+ return npd.Doctest.wantFile(self, file)
+
+
+class Unplugger(object):
+ """ Nose plugin to remove named plugin late in loading
+
+ By default it removes the "doctest" plugin.
+ """
+ name = 'unplugger'
+ enabled = True # always enabled
+ score = 4000 # load late in order to be after builtins
+
+ def __init__(self, to_unplug='doctest'):
+ self.to_unplug = to_unplug
+
+ def options(self, parser, env):
+ pass
+
+ def configure(self, options, config):
+ # Pull named plugin out of plugins list
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != self.to_unplug]
+
+
+class KnownFailurePlugin(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailure is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.'''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureException,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailure '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+KnownFailure = KnownFailurePlugin # backwards compat
+
+
+class FPUModeCheckPlugin(Plugin):
+ """
+ Plugin that checks the FPU mode before and after each test,
+ raising failures if the test changed the mode.
+ """
+
+ def prepareTestCase(self, test):
+ from numpy.core._multiarray_tests import get_fpu_mode
+
+ def run(result):
+ old_mode = get_fpu_mode()
+ test.test(result)
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ try:
+ raise AssertionError(
+ "FPU mode changed from {0:#x} to {1:#x} during the "
+ "test".format(old_mode, new_mode))
+ except AssertionError:
+ result.addFailure(test, sys.exc_info())
+
+ return run
+
+
+# Class allows us to save the results of the tests in runTests - see runTests
+# method docstring for details
+class NumpyTestProgram(nose.core.TestProgram):
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and
+ sets self.success to the same value.
+
+ Because nose currently discards the test result object, but we need
+ to return it to the user, override TestProgram.runTests to retain
+ the result
+ """
+ if self.testRunner is None:
+ self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ self.result = self.testRunner.run(self.test)
+ self.success = self.result.wasSuccessful()
+ return self.success
--- /dev/null
+"""
+Nose test running.
+
+This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import warnings
+from numpy.compat import basestring
+import numpy as np
+
+from .utils import import_nose, suppress_warnings
+
+
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
+
+
+def get_package_name(filepath):
+ """
+ Given a path where a package is installed, determine its name.
+
+ Parameters
+ ----------
+ filepath : str
+ Path to a file. If the determination fails, "numpy" is returned.
+
+ Examples
+ --------
+ >>> np.testing.nosetester.get_package_name('nonsense')
+ 'numpy'
+
+ """
+
+ fullpath = filepath[:]
+ pkg_name = []
+ while 'site-packages' in filepath or 'dist-packages' in filepath:
+ filepath, p2 = os.path.split(filepath)
+ if p2 in ('site-packages', 'dist-packages'):
+ break
+ pkg_name.append(p2)
+
+ # if package name determination failed, just default to numpy/scipy
+ if not pkg_name:
+ if 'scipy' in fullpath:
+ return 'scipy'
+ else:
+ return 'numpy'
+
+ # otherwise, reverse to get correct order and return
+ pkg_name.reverse()
+
+ # don't include the outer egg directory
+ if pkg_name[0].endswith('.egg'):
+ pkg_name.pop(0)
+
+ return '.'.join(pkg_name)
+
+
+def run_module_suite(file_to_run=None, argv=None):
+ """
+ Run a test module.
+
+ Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
+ the command line
+
+ Parameters
+ ----------
+ file_to_run : str, optional
+ Path to test module, or None.
+ By default, run the module from which this function is called.
+ argv : list of strings
+ Arguments to be passed to the nose test runner. ``argv[0]`` is
+ ignored. All command line arguments accepted by ``nosetests``
+ will work. If it is the default value None, sys.argv is used.
+
+ .. versionadded:: 1.9.0
+
+ Examples
+ --------
+ Adding the following::
+
+ if __name__ == "__main__" :
+ run_module_suite(argv=sys.argv)
+
+ at the end of a test module will run the tests when that module is
+ called in the python interpreter.
+
+ Alternatively, calling::
+
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+
+ from an interpreter will run all the test routine in 'test_matlib.py'.
+ """
+ if file_to_run is None:
+ f = sys._getframe(1)
+ file_to_run = f.f_locals.get('__file__', None)
+ if file_to_run is None:
+ raise AssertionError
+
+ if argv is None:
+ argv = sys.argv + [file_to_run]
+ else:
+ argv = argv + [file_to_run]
+
+ nose = import_nose()
+ from .noseclasses import KnownFailurePlugin
+ nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
+
+
+class NoseTester(object):
+ """
+ Nose test runner.
+
+ This class is made available as numpy.testing.Tester, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import Tester
+ test = Tester().test
+
+ Calling this test function finds and runs all tests associated with the
+ package and all its sub-packages.
+
+ Attributes
+ ----------
+ package_path : str
+ Full path to the package to test.
+ package_name : str
+ Name of the package to test.
+
+ Parameters
+ ----------
+ package : module, str or None, optional
+ The package to test. If a string, this should be the full path to
+ the package. If None (default), `package` is set to the module from
+ which `NoseTester` is initialized.
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ Default is "release".
+ depth : int, optional
+ If `package` is None, then this can be used to initialize from the
+ module of the caller of (the caller of (...)) the code that
+ initializes `NoseTester`. Default of 0 means the module of the
+ immediate caller; higher values are useful for utility routines that
+ want to initialize `NoseTester` objects on behalf of other code.
+
+ """
+ def __init__(self, package=None, raise_warnings="release", depth=0,
+ check_fpu_mode=False):
+ # Back-compat: 'None' used to mean either "release" or "develop"
+ # depending on whether this was a release or develop version of
+ # numpy. Those semantics were fine for testing numpy, but not so
+ # helpful for downstream projects like scipy that use
+ # numpy.testing. (They want to set this based on whether *they* are a
+ # release or develop version, not whether numpy is.) So we continue to
+ # accept 'None' for back-compat, but it's now just an alias for the
+ # default "release".
+ if raise_warnings is None:
+ raise_warnings = "release"
+
+ package_name = None
+ if package is None:
+ f = sys._getframe(1 + depth)
+ package_path = f.f_locals.get('__file__', None)
+ if package_path is None:
+ raise AssertionError
+ package_path = os.path.dirname(package_path)
+ package_name = f.f_locals.get('__name__', None)
+ elif isinstance(package, type(os)):
+ package_path = os.path.dirname(package.__file__)
+ package_name = getattr(package, '__name__', None)
+ else:
+ package_path = str(package)
+
+ self.package_path = package_path
+
+ # Find the package name under test; this name is used to limit coverage
+ # reporting (if enabled).
+ if package_name is None:
+ package_name = get_package_name(package_path)
+ self.package_name = package_name
+
+ # Set to "release" in constructor in maintenance branches.
+ self.raise_warnings = raise_warnings
+
+ # Whether to check for FPU mode changes
+ self.check_fpu_mode = check_fpu_mode
+
+ def _test_argv(self, label, verbose, extra_argv):
+ ''' Generate argv for nosetest command
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ see ``test`` docstring
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ argv : list
+ command line arguments that will be passed to nose
+ '''
+ argv = [__file__, self.package_path, '-s']
+ if label and label != 'full':
+ if not isinstance(label, basestring):
+ raise TypeError('Selection label should be a string')
+ if label == 'fast':
+ label = 'not slow'
+ argv += ['-A', label]
+ argv += ['--verbosity', str(verbose)]
+
+ # When installing with setuptools, and also in some other cases, the
+ # test_*.py files end up marked +x executable. Nose, by default, does
+ # not run files marked with +x as they might be scripts. However, in
+ # our case nose only looks for test_*.py files under the package
+ # directory, which should be safe.
+ argv += ['--exe']
+
+ if extra_argv:
+ argv += extra_argv
+ return argv
+
+ def _show_system_info(self):
+ nose = import_nose()
+
+ import numpy
+ print("NumPy version %s" % numpy.__version__)
+ relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+ npdir = os.path.dirname(numpy.__file__)
+ print("NumPy is installed in %s" % npdir)
+
+ if 'scipy' in self.package_name:
+ import scipy
+ print("SciPy version %s" % scipy.__version__)
+ spdir = os.path.dirname(scipy.__file__)
+ print("SciPy is installed in %s" % spdir)
+
+ pyversion = sys.version.replace('\n', '')
+ print("Python version %s" % pyversion)
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
+
+ def _get_custom_doctester(self):
+ """ Return instantiated plugin for doctests
+
+ Allows subclassing of this class to override doctester
+
+ A return value of None means use the nose builtin doctest plugin
+ """
+ from .noseclasses import NumpyDoctest
+ return NumpyDoctest()
+
+ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, timer=False):
+ """
+ Run tests for module using nose.
+
+ This method does the heavy lifting for the `test` method. It takes all
+ the same arguments, for details see `test`.
+
+ See Also
+ --------
+ test
+
+ """
+ # fail with nice error message if nose is not present
+ import_nose()
+ # compile argv
+ argv = self._test_argv(label, verbose, extra_argv)
+ # our way of doing coverage
+ if coverage:
+ argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
+ '--cover-tests', '--cover-erase']
+
+ if timer:
+ if timer is True:
+ argv += ['--with-timer']
+ elif isinstance(timer, int):
+ argv += ['--with-timer', '--timer-top-n', str(timer)]
+
+ # construct list of plugins
+ import nose.plugins.builtin
+ from nose.plugins import EntryPointPluginManager
+ from .noseclasses import (KnownFailurePlugin, Unplugger,
+ FPUModeCheckPlugin)
+ plugins = [KnownFailurePlugin()]
+ plugins += [p() for p in nose.plugins.builtin.plugins]
+ if self.check_fpu_mode:
+ plugins += [FPUModeCheckPlugin()]
+ argv += ["--with-fpumodecheckplugin"]
+ try:
+ # External plugins (like nose-timer)
+ entrypoint_manager = EntryPointPluginManager()
+ entrypoint_manager.loadPlugins()
+ plugins += [p for p in entrypoint_manager.plugins]
+ except ImportError:
+ # Relies on pkg_resources, not a hard dependency
+ pass
+
+ # add doctesting if required
+ doctest_argv = '--with-doctest' in argv
+ if doctests == False and doctest_argv:
+ doctests = True
+ plug = self._get_custom_doctester()
+ if plug is None:
+ # use standard doctesting
+ if doctests and not doctest_argv:
+ argv += ['--with-doctest']
+ else: # custom doctesting
+ if doctest_argv: # in fact the unplugger would take care of this
+ argv.remove('--with-doctest')
+ plugins += [Unplugger('doctest'), plug]
+ if doctests:
+ argv += ['--with-' + plug.name]
+ return argv, plugins
+
+ def test(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, raise_warnings=None,
+ timer=False):
+ """
+ Run tests for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the tests to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow tests as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+ doctests : bool, optional
+ If True, run doctests in module. Default is False.
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ (This requires the `coverage module:
+ <http://nedbatchelder.com/code/modules/coverage.html>`_).
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ The default is to use the class initialization value.
+ timer : bool or int, optional
+ Timing of individual tests with ``nose-timer`` (which needs to be
+ installed). If True, time tests and report on all of them.
+ If an integer (say ``N``), report timing results for ``N`` slowest
+ tests.
+
+ Returns
+ -------
+ result : object
+ Returns the result of running the tests as a
+ ``nose.result.TextTestResult`` object.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+ """
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ from . import utils
+ utils.verbose = verbose
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage, timer)
+
+ if doctests:
+ print("Running unit tests and doctests for %s" % self.package_name)
+ else:
+ print("Running unit tests for %s" % self.package_name)
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ if raise_warnings is None:
+ raise_warnings = self.raise_warnings
+
+ _warn_opts = dict(develop=(Warning,),
+ release=())
+ if isinstance(raise_warnings, basestring):
+ raise_warnings = _warn_opts[raise_warnings]
+
+ with suppress_warnings("location") as sup:
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # Set all warnings to 'warn', this is because the default 'once'
+ # has the bad property of possibly shadowing later warnings.
+ warnings.filterwarnings('always')
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ sup.filter(message='Not importing directory')
+ sup.filter(message="numpy.dtype size changed")
+ sup.filter(message="numpy.ufunc size changed")
+ sup.filter(category=np.ModuleDeprecationWarning)
+ # Filter out boolean '-' deprecation messages. This allows
+ # older versions of scipy to test without a flood of messages.
+ sup.filter(message=".*boolean negative.*")
+ sup.filter(message=".*boolean subtract.*")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from ...distutils import cpuinfo
+ sup.filter(category=UserWarning, module=cpuinfo)
+ # See #7949: Filter out deprecation warnings due to the -3 flag to
+ # python 2
+ if sys.version_info.major == 2 and sys.py3kwarning:
+ # This is very specific, so using the fragile module filter
+ # is fine
+ import threading
+ sup.filter(DeprecationWarning,
+ r"sys\.exc_clear\(\) not supported in 3\.x",
+ module=threading)
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
+ sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
+ # Filter out some deprecation warnings inside nose 1.3.7 when run
+ # on python 3.5b2. See
+ # https://github.com/nose-devs/nose/issues/929
+ # Note: it is hard to filter based on module for sup (lineno could
+ # be implemented).
+ warnings.filterwarnings("ignore", message=".*getargspec.*",
+ category=DeprecationWarning,
+ module=r"nose\.")
+
+ from .noseclasses import NumpyTestProgram
+
+ t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
+
+ return t.result
+
+ def bench(self, label='fast', verbose=1, extra_argv=None):
+ """
+ Run benchmarks for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the benchmarks to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow benchmarks as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ success : bool
+ Returns True if running the benchmarks works, False if an error
+ occurred.
+
+ Notes
+ -----
+ Benchmarks are like tests, but have names starting with "bench" instead
+ of "test", and can be found under the "benchmarks" sub-directory of the
+ module.
+
+ Each NumPy module exposes `bench` in its namespace to run all benchmarks
+ for it.
+
+ Examples
+ --------
+ >>> success = np.lib.bench() #doctest: +SKIP
+ Running benchmarks for numpy.lib
+ ...
+ using 562341 items:
+ unique:
+ 0.11
+ unique1d:
+ 0.11
+ ratio: 1.0
+ nUnique: 56230 == 56230
+ ...
+ OK
+
+ >>> success #doctest: +SKIP
+ True
+
+ """
+
+ print("Running benchmarks for %s" % self.package_name)
+ self._show_system_info()
+
+ argv = self._test_argv(label, verbose, extra_argv)
+ argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+ # import nose or make informative error
+ nose = import_nose()
+
+ # get plugin to disable doctests
+ from .noseclasses import Unplugger
+ add_plugins = [Unplugger('doctest')]
+
+ return nose.run(argv=argv, addplugins=add_plugins)
+
+
+def _numpy_tester():
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ mode = "develop"
+ else:
+ mode = "release"
+ return NoseTester(raise_warnings=mode, depth=1,
+ check_fpu_mode=True)
--- /dev/null
+"""
+tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
+
+Unless stated otherwise in the source files, all code is copyright 2010 David
+Wolever <david@wolever.net>. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of David Wolever.
+
+"""
+import re
+import sys
+import inspect
+import warnings
+from functools import wraps
+from types import MethodType as MethodType
+from collections import namedtuple
+
+try:
+ from collections import OrderedDict as MaybeOrderedDict
+except ImportError:
+ MaybeOrderedDict = dict
+
+from unittest import TestCase
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+
+if PY3:
+ # Python 3 doesn't have an InstanceType, so just use a dummy type.
+ class InstanceType():
+ pass
+ lzip = lambda *a: list(zip(*a))
+ text_type = str
+ string_types = str,
+ bytes_type = bytes
+ def make_method(func, instance, type):
+ if instance is None:
+ return func
+ return MethodType(func, instance)
+else:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+ """ Represents a single parameter to a test case.
+
+ For example::
+
+ >>> p = param("foo", bar=16)
+ >>> p
+ param("foo", bar=16)
+ >>> p.args
+ ('foo', )
+ >>> p.kwargs
+ {'bar': 16}
+
+ Intended to be used as an argument to ``@parameterized``::
+
+ @parameterized([
+ param("foo", bar=16),
+ ])
+ def test_stuff(foo, bar=16):
+ pass
+ """
+
+ def __new__(cls, *args , **kwargs):
+ return _param.__new__(cls, args, kwargs)
+
+ @classmethod
+ def explicit(cls, args=None, kwargs=None):
+ """ Creates a ``param`` by explicitly specifying ``args`` and
+ ``kwargs``::
+
+ >>> param.explicit([1,2,3])
+ param(*(1, 2, 3))
+ >>> param.explicit(kwargs={"foo": 42})
+ param(*(), **{"foo": "42"})
+ """
+ args = args or ()
+ kwargs = kwargs or {}
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_decorator(cls, args):
+ """ Returns an instance of ``param()`` for ``@parameterized`` argument
+ ``args``::
+
+ >>> param.from_decorator((42, ))
+ param(args=(42, ), kwargs={})
+ >>> param.from_decorator("foo")
+ param(args=("foo", ), kwargs={})
+ """
+ if isinstance(args, param):
+ return args
+ elif isinstance(args, string_types):
+ args = (args, )
+ try:
+ return cls(*args)
+ except TypeError as e:
+ if "after * must be" not in str(e):
+ raise
+ raise TypeError(
+ "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
+ %(args, args),
+ )
+
+ def __repr__(self):
+ return "param(*%r, **%r)" %self
+
+
+class QuietOrderedDict(MaybeOrderedDict):
+ """ When OrderedDict is available, use it to make sure that the kwargs in
+ doc strings are consistently ordered. """
+ __str__ = dict.__str__
+ __repr__ = dict.__repr__
+
+
+def parameterized_argument_value_pairs(func, p):
+ """Return tuples of parameterized arguments and their values.
+
+ This is useful if you are writing your own doc_func
+ function and need to know the values for each parameter name::
+
+ >>> def func(a, foo=None, bar=42, **kwargs): pass
+ >>> p = param(1, foo=7, extra=99)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
+
+ If the function's first argument is named ``self`` then it will be
+ ignored::
+
+ >>> def func(self, a): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1)]
+
+ Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
+
+ >>> def func(foo, *args): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1)]
+ >>> p = param(1, 16)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1), ("*args", (16, ))]
+ """
+ argspec = inspect.getargspec(func)
+ arg_offset = 1 if argspec.args[:1] == ["self"] else 0
+
+ named_args = argspec.args[arg_offset:]
+
+ result = lzip(named_args, p.args)
+ named_args = argspec.args[len(result) + arg_offset:]
+ varargs = p.args[len(result):]
+
+ result.extend([
+ (name, p.kwargs.get(name, default))
+ for (name, default)
+ in zip(named_args, argspec.defaults or [])
+ ])
+
+ seen_arg_names = set([ n for (n, _) in result ])
+ keywords = QuietOrderedDict(sorted([
+ (name, p.kwargs[name])
+ for name in p.kwargs
+ if name not in seen_arg_names
+ ]))
+
+ if varargs:
+ result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
+
+ if keywords:
+ result.append(("**%s" %(argspec.keywords, ), keywords))
+
+ return result
+
+def short_repr(x, n=64):
+ """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
+
+ >>> short_repr("foo")
+ u"foo"
+ >>> short_repr("123456789", n=4)
+ u"12...89"
+ """
+
+ x_repr = repr(x)
+ if isinstance(x_repr, bytes_type):
+ try:
+ x_repr = text_type(x_repr, "utf-8")
+ except UnicodeDecodeError:
+ x_repr = text_type(x_repr, "latin1")
+ if len(x_repr) > n:
+ x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
+ return x_repr
+
+def default_doc_func(func, num, p):
+ if func.__doc__ is None:
+ return None
+
+ all_args_with_values = parameterized_argument_value_pairs(func, p)
+
+ # Assumes that the function passed is a bound method.
+ descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
+
+ # The documentation might be a multiline string, so split it
+ # and just work with the first string, ignoring the period
+ # at the end if there is one.
+ first, nl, rest = func.__doc__.lstrip().partition("\n")
+ suffix = ""
+ if first.endswith("."):
+ suffix = "."
+ first = first[:-1]
+ args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
+ return "".join([first.rstrip(), args, suffix, nl, rest])
+
+def default_name_func(func, num, p):
+ base_name = func.__name__
+ name_suffix = "_%s" %(num, )
+ if len(p.args) > 0 and isinstance(p.args[0], string_types):
+ name_suffix += "_" + parameterized.to_safe_name(p.args[0])
+ return base_name + name_suffix
+
+
+# force nose for numpy purposes.
+_test_runner_override = 'nose'
+_test_runner_guess = False
+_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
+_test_runner_aliases = {
+ "_pytest": "pytest",
+}
+
+def set_test_runner(name):
+ global _test_runner_override
+ if name not in _test_runners:
+ raise TypeError(
+ "Invalid test runner: %r (must be one of: %s)"
+ %(name, ", ".join(_test_runners)),
+ )
+ _test_runner_override = name
+
+def detect_runner():
+ """ Guess which test runner we're using by traversing the stack and looking
+ for the first matching module. This *should* be reasonably safe, as
+ it's done during test disocvery where the test runner should be the
+ stack frame immediately outside. """
+ if _test_runner_override is not None:
+ return _test_runner_override
+ global _test_runner_guess
+ if _test_runner_guess is False:
+ stack = inspect.stack()
+ for record in reversed(stack):
+ frame = record[0]
+ module = frame.f_globals.get("__name__").partition(".")[0]
+ if module in _test_runner_aliases:
+ module = _test_runner_aliases[module]
+ if module in _test_runners:
+ _test_runner_guess = module
+ break
+ if record[1].endswith("python2.6/unittest.py"):
+ _test_runner_guess = "unittest"
+ break
+ else:
+ _test_runner_guess = None
+ return _test_runner_guess
+
+class parameterized(object):
+ """ Parameterize a test case::
+
+ class TestInt(object):
+ @parameterized([
+ ("A", 10),
+ ("F", 15),
+ param("10", 42, base=42)
+ ])
+ def test_int(self, input, expected, base=16):
+ actual = int(input, base=base)
+ assert_equal(actual, expected)
+
+ @parameterized([
+ (2, 3, 5)
+ (3, 5, 8),
+ ])
+ def test_add(a, b, expected):
+ assert_equal(a + b, expected)
+ """
+
+ def __init__(self, input, doc_func=None):
+ self.get_input = self.input_as_callable(input)
+ self.doc_func = doc_func or default_doc_func
+
+ def __call__(self, test_func):
+ self.assert_not_in_testcase_subclass()
+
+ @wraps(test_func)
+ def wrapper(test_self=None):
+ test_cls = test_self and type(test_self)
+ if test_self is not None:
+ if issubclass(test_cls, InstanceType):
+ raise TypeError((
+ "@parameterized can't be used with old-style classes, but "
+ "%r has an old-style class. Consider using a new-style "
+ "class, or '@parameterized.expand' "
+ "(see http://stackoverflow.com/q/54867/71522 for more "
+ "information on old-style classes)."
+ ) %(test_self, ))
+
+ original_doc = wrapper.__doc__
+ for num, args in enumerate(wrapper.parameterized_input):
+ p = param.from_decorator(args)
+ unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
+ try:
+ wrapper.__doc__ = nose_tuple[0].__doc__
+ # Nose uses `getattr(instance, test_func.__name__)` to get
+ # a method bound to the test instance (as opposed to a
+ # method bound to the instance of the class created when
+ # tests were being enumerated). Set a value here to make
+ # sure nose can get the correct test method.
+ if test_self is not None:
+ setattr(test_cls, test_func.__name__, unbound_func)
+ yield nose_tuple
+ finally:
+ if test_self is not None:
+ delattr(test_cls, test_func.__name__)
+ wrapper.__doc__ = original_doc
+ wrapper.parameterized_input = self.get_input()
+ wrapper.parameterized_func = test_func
+ test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
+ return wrapper
+
+ def param_as_nose_tuple(self, test_self, func, num, p):
+ nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
+ nose_func.__doc__ = self.doc_func(func, num, p)
+ # Track the unbound function because we need to setattr the unbound
+ # function onto the class for nose to work (see comments above), and
+ # Python 3 doesn't let us pull the function out of a bound method.
+ unbound_func = nose_func
+ if test_self is not None:
+ # Under nose on Py2 we need to return an unbound method to make
+ # sure that the `self` in the method is properly shared with the
+ # `self` used in `setUp` and `tearDown`. But only there. Everyone
+ # else needs a bound method.
+ func_self = (
+ None if PY2 and detect_runner() == "nose" else
+ test_self
+ )
+ nose_func = make_method(nose_func, func_self, type(test_self))
+ return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
+
+ def assert_not_in_testcase_subclass(self):
+ parent_classes = self._terrible_magic_get_defining_classes()
+ if any(issubclass(cls, TestCase) for cls in parent_classes):
+ raise Exception("Warning: '@parameterized' tests won't work "
+ "inside subclasses of 'TestCase' - use "
+ "'@parameterized.expand' instead.")
+
+ def _terrible_magic_get_defining_classes(self):
+ """ Returns the set of parent classes of the class currently being defined.
+ Will likely only work if called from the ``parameterized`` decorator.
+ This function is entirely @brandon_rhodes's fault, as he suggested
+ the implementation: http://stackoverflow.com/a/8793684/71522
+ """
+ stack = inspect.stack()
+ if len(stack) <= 4:
+ return []
+ frame = stack[4]
+ code_context = frame[4] and frame[4][0].strip()
+ if not (code_context and code_context.startswith("class ")):
+ return []
+ _, _, parents = code_context.partition("(")
+ parents, _, _ = parents.partition(")")
+ return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+ @classmethod
+ def input_as_callable(cls, input):
+ if callable(input):
+ return lambda: cls.check_input_values(input())
+ input_values = cls.check_input_values(input)
+ return lambda: input_values
+
+ @classmethod
+ def check_input_values(cls, input_values):
+ # Explicitly convert non-list inputs to a list so that:
+ # 1. A helpful exception will be raised if they aren't iterable, and
+ # 2. Generators are unwrapped exactly once (otherwise `nosetests
+ # --processes=n` has issues; see:
+ # https://github.com/wolever/nose-parameterized/pull/31)
+ if not isinstance(input_values, list):
+ input_values = list(input_values)
+ return [ param.from_decorator(p) for p in input_values ]
+
+ @classmethod
+ def expand(cls, input, name_func=None, doc_func=None, **legacy):
+ """ A "brute force" method of parameterizing test cases. Creates new
+ test cases and injects them into the namespace that the wrapped
+ function is being defined in. Useful for parameterizing tests in
+ subclasses of 'UnitTest', where Nose test generators don't work.
+
+ >>> @parameterized.expand([("foo", 1, 2)])
+ ... def test_add1(name, input, expected):
+ ... actual = add1(input)
+ ... assert_equal(actual, expected)
+ ...
+ >>> locals()
+ ... 'test_add1_foo_0': <function ...> ...
+ >>>
+ """
+
+ if "testcase_func_name" in legacy:
+ warnings.warn("testcase_func_name= is deprecated; use name_func=",
+ DeprecationWarning, stacklevel=2)
+ if not name_func:
+ name_func = legacy["testcase_func_name"]
+
+ if "testcase_func_doc" in legacy:
+ warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
+ DeprecationWarning, stacklevel=2)
+ if not doc_func:
+ doc_func = legacy["testcase_func_doc"]
+
+ doc_func = doc_func or default_doc_func
+ name_func = name_func or default_name_func
+
+ def parameterized_expand_wrapper(f, instance=None):
+ stack = inspect.stack()
+ frame = stack[1]
+ frame_locals = frame[0].f_locals
+
+ parameters = cls.input_as_callable(input)()
+ for num, p in enumerate(parameters):
+ name = name_func(f, num, p)
+ frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+ frame_locals[name].__doc__ = doc_func(f, num, p)
+
+ f.__test__ = False
+ return parameterized_expand_wrapper
+
+ @classmethod
+ def param_as_standalone_func(cls, p, func, name):
+ @wraps(func)
+ def standalone_func(*a):
+ return func(*(a + p.args), **p.kwargs)
+ standalone_func.__name__ = name
+
+ # place_as is used by py.test to determine what source file should be
+ # used for this test.
+ standalone_func.place_as = func
+
+ # Remove __wrapped__ because py.test will try to look at __wrapped__
+ # to determine which parameters should be used with this test case,
+ # and obviously we don't need it to do any parameterization.
+ try:
+ del standalone_func.__wrapped__
+ except AttributeError:
+ pass
+ return standalone_func
+
+ @classmethod
+ def to_safe_name(cls, s):
+ return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
--- /dev/null
+"""
+Pytest test running.
+
+This module implements the ``test()`` function for NumPy modules. The usual
+boiler plate for doing that is to put the following in the module
+``__init__.py`` file::
+
+ from numpy.testing import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+
+Warnings filtering and other runtime settings should be dealt with in the
+``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
+whether or not that file is found as follows:
+
+* ``pytest.ini`` is present (develop mode)
+ All warnings except those explicily filtered out are raised as error.
+* ``pytest.ini`` is absent (release mode)
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
+ warnings are passed through.
+
+In practice, tests run from the numpy repo are run in develop mode. That
+includes the standard ``python runtests.py`` invocation.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+
+__all__ = ['PytestTester']
+
+
+
+def _show_numpy_info():
+ import numpy as np
+
+ print("NumPy version %s" % np.__version__)
+ relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+
+
+class PytestTester(object):
+ """
+ Pytest test runner.
+
+ This class is made available in ``numpy.testing``, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+ Calling this test function finds and runs all tests associated with the
+ module and all its sub-modules.
+
+ Attributes
+ ----------
+ module_name : str
+ Full path to the package to test.
+
+ Parameters
+ ----------
+ module_name : module name
+ The name of the module to test.
+
+ """
+ def __init__(self, module_name):
+ self.module_name = module_name
+
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, durations=-1, tests=None):
+ """
+ Run tests for module using pytest.
+
+ Parameters
+ ----------
+ label : {'fast', 'full'}, optional
+ Identifies the tests to run. When set to 'fast', tests decorated
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
+ is ignored.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to pytests.
+ doctests : bool, optional
+ .. note:: Not supported
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ Requires installation of (pip) pytest-cov.
+ durations : int, optional
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
+ report the time of the slowest `timer` tests. Default is -1.
+ tests : test or list of tests
+ Tests to be executed with pytest '--pyargs'
+
+ Returns
+ -------
+ result : bool
+ Return True on success, false otherwise.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+
+ """
+ import pytest
+ import warnings
+
+ #FIXME This is no longer needed? Assume it was for use in tests.
+ # cap verbosity at 3, which is equivalent to the pytest '-vv' option
+ #from . import utils
+ #verbose = min(int(verbose), 3)
+ #utils.verbose = verbose
+ #
+
+ module = sys.modules[self.module_name]
+ module_path = os.path.abspath(module.__path__[0])
+
+ # setup the pytest arguments
+ pytest_args = ["-l"]
+
+ # offset verbosity. The "-q" cancels a "-v".
+ pytest_args += ["-q"]
+
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from numpy.distutils import cpuinfo
+
+ # Filter out annoying import messages. Want these in both develop and
+ # release mode.
+ pytest_args += [
+ "-W ignore:Not importing directory",
+ "-W ignore:numpy.dtype size changed",
+ "-W ignore:numpy.ufunc size changed",
+ "-W ignore::UserWarning:cpuinfo",
+ ]
+
+ if doctests:
+ raise ValueError("Doctests not supported")
+
+ if extra_argv:
+ pytest_args += list(extra_argv)
+
+ if verbose > 1:
+ pytest_args += ["-" + "v"*(verbose - 1)]
+
+ if coverage:
+ pytest_args += ["--cov=" + module_path]
+
+ if label == "fast":
+ pytest_args += ["-m", "not slow"]
+ elif label != "full":
+ pytest_args += ["-m", label]
+
+ if durations >= 0:
+ pytest_args += ["--durations=%s" % durations]
+
+ if tests is None:
+ tests = [self.module_name]
+
+ pytest_args += ["--pyargs"] + list(tests)
+
+
+ # run tests.
+ _show_numpy_info()
+
+ try:
+ code = pytest.main(pytest_args)
+ except SystemExit as exc:
+ code = exc.code
+
+ return code == 0
--- /dev/null
+"""
+Utility function to facilitate testing.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import re
+import gc
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+from warnings import WarningMessage
+import pprint
+
+from numpy.core import(
+ float32, empty, arange, array_repr, ndarray, isnat, array)
+from numpy.lib.utils import deprecate
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+__all__ = [
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+ 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
+ ]
+
+
+class KnownFailureException(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+
+KnownFailureTest = KnownFailureException # backwards compat
+verbose = 0
+
+IS_PYPY = '__pypy__' in sys.modules
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+
+
+def import_nose():
+ """ Import nose only when needed.
+ """
+ nose_is_good = True
+ minimum_nose_version = (1, 0, 0)
+ try:
+ import nose
+ except ImportError:
+ nose_is_good = False
+ else:
+ if nose.__versioninfo__ < minimum_nose_version:
+ nose_is_good = False
+
+ if not nose_is_good:
+ msg = ('Need nose >= %d.%d.%d for tests - see '
+ 'http://nose.readthedocs.io' %
+ minimum_nose_version)
+ raise ImportError(msg)
+
+ return nose
+
+
+def assert_(val, msg=''):
+ """
+ Assert that works in release mode.
+ Accepts callable msg to allow deferring evaluation until failure.
+
+ The Python built-in ``assert`` does not work when executing code in
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+ For documentation on usage, refer to the Python documentation.
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if not val:
+ try:
+ smsg = msg()
+ except TypeError:
+ smsg = msg
+ raise AssertionError(smsg)
+
+
+def gisnan(x):
+ """like isnan, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isnan and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isnan
+ st = isnan(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isnan not supported for this type")
+ return st
+
+
+def gisfinite(x):
+ """like isfinite, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isfinite and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isfinite, errstate
+ with errstate(invalid='ignore'):
+ st = isfinite(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isfinite not supported for this type")
+ return st
+
+
+def gisinf(x):
+ """like isinf, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isinf and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isinf, errstate
+ with errstate(invalid='ignore'):
+ st = isinf(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isinf not supported for this type")
+ return st
+
+
+@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
+ "Use numpy.random.rand instead.")
+def rand(*args):
+ """Returns an array of random numbers with the given shape.
+
+ This only uses the standard library, so it is useful for testing purposes.
+ """
+ import random
+ from numpy.core import zeros, float64
+ results = zeros(args, float64)
+ f = results.flat
+ for i in range(len(f)):
+ f[i] = random.random()
+ return results
+
+
+if os.name == 'nt':
+ # Code "stolen" from enthought/debug/memusage.py
+ def GetPerformanceAttributes(object, counter, instance=None,
+ inum=-1, format=None, machine=None):
+ # NOTE: Many counters require 2 samples to give accurate results,
+ # including "% Processor Time" (as by definition, at any instant, a
+ # thread's CPU usage is either 0 or 100). To read counters like this,
+ # you should copy this function, but keep the counter open, and call
+ # CollectQueryData() each time you need to know.
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
+ # My older explanation for this was that the "AddCounter" process forced
+ # the CPU to 100%, but the above makes more sense :)
+ import win32pdh
+ if format is None:
+ format = win32pdh.PDH_FMT_LONG
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
+ hq = win32pdh.OpenQuery()
+ try:
+ hc = win32pdh.AddCounter(hq, path)
+ try:
+ win32pdh.CollectQueryData(hq)
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
+ return val
+ finally:
+ win32pdh.RemoveCounter(hc)
+ finally:
+ win32pdh.CloseQuery(hq)
+
+ def memusage(processName="python", instance=0):
+ # from win32pdhutil, part of the win32all package
+ import win32pdh
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
+ processName, instance,
+ win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+ def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
+ """
+ Return virtual memory size in bytes of the running python.
+
+ """
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[22])
+ except Exception:
+ return
+else:
+ def memusage():
+ """
+ Return memory usage of running python. [Not implemented]
+
+ """
+ raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+ def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
+ _load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[13])
+ except Exception:
+ return int(100*(time.time()-_load_time[0]))
+else:
+ # os.getpid is not in all platforms available.
+ # Using time is safe but inaccurate, especially when process
+ # was suspended or sleeping.
+ def jiffies(_load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+ verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+ msg = ['\n' + header]
+ if err_msg:
+ if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+ msg = [msg[0] + ' ' + err_msg]
+ else:
+ msg.append(err_msg)
+ if verbose:
+ for i, a in enumerate(arrays):
+
+ if isinstance(a, ndarray):
+ # precision argument is only needed if the objects are ndarrays
+ r_func = partial(array_repr, precision=precision)
+ else:
+ r_func = repr
+
+ try:
+ r = r_func(a)
+ except Exception as exc:
+ r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
+ if r.count('\n') > 3:
+ r = '\n'.join(r.splitlines()[:3])
+ r += '...'
+ msg.append(' %s: %s' % (names[i], r))
+ return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal.
+
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+ check that all elements of these objects are equal. An exception is raised
+ at the first conflicting values.
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal.
+
+ Examples
+ --------
+ >>> np.testing.assert_equal([4,5], [4,6])
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ item=1
+ ACTUAL: 5
+ DESIRED: 6
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(repr(k))
+ assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
+ return
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k in range(len(desired)):
+ assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
+ return
+ from numpy.core import ndarray, isscalar, signbit
+ from numpy.lib import iscomplexobj, real, imag
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+ return assert_array_equal(actual, desired, err_msg, verbose)
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_equal(actualr, desiredr)
+ assert_equal(actuali, desiredi)
+ except AssertionError:
+ raise AssertionError(msg)
+
+ # isscalar test to check cases such as [np.nan] != np.nan
+ if isscalar(desired) != isscalar(actual):
+ raise AssertionError(msg)
+
+ # Inf/nan/negative zero handling
+ try:
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan and isactnan:
+ return # both nan, so equal
+
+ # handle signed zero specially for floats
+ if desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ try:
+ isdesnat = isnat(desired)
+ isactnat = isnat(actual)
+ dtypes_match = array(desired).dtype.type == array(actual).dtype.type
+ if isdesnat and isactnat:
+ # If both are NaT (and have the same dtype -- datetime or
+ # timedelta) they are considered equal.
+ if dtypes_match:
+ return
+ else:
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ try:
+ # Explicitly use __eq__ for comparison, gh-2552
+ if not (desired == actual):
+ raise AssertionError(msg)
+
+ except (DeprecationWarning, FutureWarning) as e:
+ # this handles the case when the two types are not even comparable
+ if 'elementwise == comparison' in e.args[0]:
+ raise AssertionError(msg)
+ else:
+ raise
+
+
+def print_assert_equal(test_string, actual, desired):
+ """
+ Test if two objects are equal, and print an error message if test fails.
+
+ The test is performed with ``actual == desired``.
+
+ Parameters
+ ----------
+ test_string : str
+ The message supplied to AssertionError.
+ actual : object
+ The object to test for equality against `desired`.
+ desired : object
+ The expected result.
+
+ Examples
+ --------
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+ Traceback (most recent call last):
+ ...
+ AssertionError: Test XYZ of func xyz failed
+ ACTUAL:
+ [0, 1]
+ DESIRED:
+ [0, 2]
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import pprint
+
+ if not (actual == desired):
+ msg = StringIO()
+ msg.write(test_string)
+ msg.write(' failed\nACTUAL: \n')
+ pprint.pprint(actual, msg)
+ msg.write('DESIRED: \n')
+ pprint.pprint(desired, msg)
+ raise AssertionError(msg.getvalue())
+
+
+def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies that the elements of ``actual`` and ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation in `assert_array_almost_equal` did up to rounding
+ vagaries. An exception is raised at conflicting values. For ndarrays this
+ delegates to assert_array_almost_equal
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ decimal : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> import numpy.testing as npt
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ ACTUAL: 2.3333333333333002
+ DESIRED: 2.3333333399999998
+
+ >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
+ ... np.array([1.0,2.33333334]), decimal=9)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333333])
+ y: array([ 1. , 2.33333334])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import ndarray
+ from numpy.lib import iscomplexobj, real, imag
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ def _build_err_msg():
+ header = ('Arrays are not almost equal to %d decimals' % decimal)
+ return build_err_msg([actual, desired], err_msg, verbose=verbose,
+ header=header)
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
+ except AssertionError:
+ raise AssertionError(_build_err_msg())
+
+ if isinstance(actual, (ndarray, tuple, list)) \
+ or isinstance(desired, (ndarray, tuple, list)):
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(_build_err_msg())
+ else:
+ if not desired == actual:
+ raise AssertionError(_build_err_msg())
+ return
+ except (NotImplementedError, TypeError):
+ pass
+ if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
+ raise AssertionError(_build_err_msg())
+
+
+def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to significant
+ digits.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ Given two numbers, check that they are approximately equal.
+ Approximately equal is defined as the number of significant digits
+ that agree.
+
+ Parameters
+ ----------
+ actual : scalar
+ The object to check.
+ desired : scalar
+ The expected object.
+ significant : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+ significant=8)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+ significant=8)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal to 8 significant digits:
+ ACTUAL: 1.234567e-021
+ DESIRED: 1.2345672000000001e-021
+
+ the evaluated condition that raises the exception is
+
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+ True
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ (actual, desired) = map(float, (actual, desired))
+ if desired == actual:
+ return
+ # Normalized the numbers to be in range (-10.0,10.0)
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ with np.errstate(invalid='ignore'):
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
+ scale = np.power(10, np.floor(np.log10(scale)))
+ try:
+ sc_desired = desired/scale
+ except ZeroDivisionError:
+ sc_desired = 0.0
+ try:
+ sc_actual = actual/scale
+ except ZeroDivisionError:
+ sc_actual = 0.0
+ msg = build_err_msg([actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' %
+ significant,
+ verbose=verbose)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except (TypeError, NotImplementedError):
+ pass
+ if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+ raise AssertionError(msg)
+
+
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
+ header='', precision=6, equal_nan=True,
+ equal_inf=True):
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import array, isnan, inf, bool_
+ from numpy.core.fromnumeric import all as npall
+
+ x = array(x, copy=False, subok=True)
+ y = array(y, copy=False, subok=True)
+
+ def isnumber(x):
+ return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+ def istime(x):
+ return x.dtype.char in "Mm"
+
+ def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
+ """Handling nan/inf.
+
+ Combine results of running func on x and y, checking that they are True
+ at the same locations.
+
+ """
+ # Both the != True comparison here and the cast to bool_ at the end are
+ # done to deal with `masked`, which cannot be compared usefully, and
+ # for which np.all yields masked. The use of the function np.all is
+ # for back compatibility with ndarray subclasses that changed the
+ # return values of the all method. We are not committed to supporting
+ # such subclasses, but some used to work.
+ x_id = func(x)
+ y_id = func(y)
+ if npall(x_id == y_id) != True:
+ msg = build_err_msg([x, y],
+ err_msg + '\nx and y %s location mismatch:'
+ % (hasval), verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+ # If there is a scalar, then here we know the array has the same
+ # flag as it everywhere, so we should return the scalar flag.
+ if x_id.ndim == 0:
+ return bool_(x_id)
+ elif y_id.ndim == 0:
+ return bool_(y_id)
+ else:
+ return y_id
+
+ try:
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+ if not cond:
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(shapes %s, %s mismatch)' % (x.shape,
+ y.shape),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ flagged = bool_(False)
+ if isnumber(x) and isnumber(y):
+ if equal_nan:
+ flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
+
+ if equal_inf:
+ flagged |= func_assert_same_pos(x, y,
+ func=lambda xy: xy == +inf,
+ hasval='+inf')
+ flagged |= func_assert_same_pos(x, y,
+ func=lambda xy: xy == -inf,
+ hasval='-inf')
+
+ elif istime(x) and istime(y):
+ # If one is datetime64 and the other timedelta64 there is no point
+ if equal_nan and x.dtype.type == y.dtype.type:
+ flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
+
+ if flagged.ndim > 0:
+ x, y = x[~flagged], y[~flagged]
+ # Only do the comparison if actual values are left
+ if x.size == 0:
+ return
+ elif flagged:
+ # no sense doing comparison if everything is flagged.
+ return
+
+ val = comparison(x, y)
+
+ if isinstance(val, bool):
+ cond = val
+ reduced = [0]
+ else:
+ reduced = val.ravel()
+ cond = reduced.all()
+ reduced = reduced.tolist()
+ # The below comparison is a hack to ensure that fully masked
+ # results, for which val.ravel().all() returns np.ma.masked,
+ # do not trigger a failure (np.ma.masked != True evaluates as
+ # np.ma.masked, which is falsy).
+ if cond != True:
+ match = 100-100.0*reduced.count(1)/len(reduced)
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(mismatch %s%%)' % (match,),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+ except ValueError:
+ import traceback
+ efmt = traceback.format_exc()
+ header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
+
+ msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not equal.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of these objects are equal. An exception is raised at
+ shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if
+ both objects have NaNs in the same positions.
+
+ The usual caution for verifying equality with floating point numbers is
+ advised.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ The first assert does not raise an exception:
+
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+ ... [np.exp(0),2.33333, np.nan])
+
+ Assert fails with numerical inprecision with floats:
+
+ >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 3.14159265, NaN])
+ y: array([ 1. , 3.14159265, NaN])
+
+ Use `assert_allclose` or one of the nulp (number of floating point values)
+ functions for these cases instead:
+
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan],
+ ... rtol=1e-10, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+ verbose=verbose, header='Arrays are not equal')
+
+
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies identical shapes and that the elements of ``actual`` and
+ ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation did up to rounding vagaries. An exception is raised
+ at shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
+ objects have NaNs in the same positions.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ decimal : int, optional
+ Desired precision, default is 6.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ the first assert does not raise an exception
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+ [1.0,2.333,np.nan])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33339,np.nan], decimal=5)
+ ...
+ <type 'exceptions.AssertionError'>:
+ AssertionError:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33339, NaN])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33333, 5], decimal=5)
+ <type 'exceptions.ValueError'>:
+ ValueError:
+ Arrays are not almost equal
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33333, 5. ])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import around, number, float_, result_type, array
+ from numpy.core.numerictypes import issubdtype
+ from numpy.core.fromnumeric import any as npany
+
+ def compare(x, y):
+ try:
+ if npany(gisinf(x)) or npany( gisinf(y)):
+ xinfid = gisinf(x)
+ yinfid = gisinf(y)
+ if not (xinfid == yinfid).all():
+ return False
+ # if one item, x and y is +- inf
+ if x.size == y.size == 1:
+ return x == y
+ x = x[~xinfid]
+ y = y[~yinfid]
+ except (TypeError, NotImplementedError):
+ pass
+
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+ # casting of x later.
+ dtype = result_type(y, 1.)
+ y = array(y, dtype=dtype, copy=False, subok=True)
+ z = abs(x - y)
+
+ if not issubdtype(z.dtype, number):
+ z = z.astype(float_) # handle object arrays
+
+ return z < 1.5 * 10.0**(-decimal)
+
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header=('Arrays are not almost equal to %d decimals' % decimal),
+ precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not ordered by less
+ than.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of the first object are strictly smaller than those of the
+ second object. An exception is raised at shape mismatch or incorrectly
+ ordered values. Shape mismatch does not raise if an object has zero
+ dimension. In contrast to the standard usage in numpy, NaNs are
+ compared, no assertion is raised if both objects have NaNs in the same
+ positions.
+
+
+
+ Parameters
+ ----------
+ x : array_like
+ The smaller object to check.
+ y : array_like
+ The larger object to compare.
+ err_msg : string
+ The error message to be printed in case of failure.
+ verbose : bool
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_array_equal: tests objects for equality
+ assert_array_almost_equal: test objects for equality up to precision
+
+
+
+ Examples
+ --------
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 1., NaN])
+ y: array([ 1., 2., NaN])
+
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 4.])
+ y: array(3)
+
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (shapes (3,), (1,) mismatch)
+ x: array([ 1., 2., 3.])
+ y: array([4])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+ verbose=verbose,
+ header='Arrays are not less-ordered',
+ equal_inf=False)
+
+
+def runstring(astr, dict):
+ exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+ """
+ Test if two strings are equal.
+
+ If the given strings are equal, `assert_string_equal` does nothing.
+ If they are not equal, an AssertionError is raised, and the diff
+ between the strings is shown.
+
+ Parameters
+ ----------
+ actual : str
+ The string to test for equality against the expected string.
+ desired : str
+ The expected string.
+
+ Examples
+ --------
+ >>> np.testing.assert_string_equal('abc', 'abc')
+ >>> np.testing.assert_string_equal('abc', 'abcd')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ...
+ AssertionError: Differences in strings:
+ - abc+ abcd? +
+
+ """
+ # delay import of difflib to reduce startup time
+ __tracebackhide__ = True # Hide traceback for py.test
+ import difflib
+
+ if not isinstance(actual, str):
+ raise AssertionError(repr(type(actual)))
+ if not isinstance(desired, str):
+ raise AssertionError(repr(type(desired)))
+ if re.match(r'\A'+desired+r'\Z', actual, re.M):
+ return
+
+ diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
+ diff_list = []
+ while diff:
+ d1 = diff.pop(0)
+ if d1.startswith(' '):
+ continue
+ if d1.startswith('- '):
+ l = [d1]
+ d2 = diff.pop(0)
+ if d2.startswith('? '):
+ l.append(d2)
+ d2 = diff.pop(0)
+ if not d2.startswith('+ '):
+ raise AssertionError(repr(d2))
+ l.append(d2)
+ if diff:
+ d3 = diff.pop(0)
+ if d3.startswith('? '):
+ l.append(d3)
+ else:
+ diff.insert(0, d3)
+ if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
+ continue
+ diff_list.extend(l)
+ continue
+ raise AssertionError(repr(d1))
+ if not diff_list:
+ return
+ msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
+ if actual != desired:
+ raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+ """
+ Run doctests found in the given file.
+
+ By default `rundocs` raises an AssertionError on failure.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file for which the doctests are run.
+ raise_on_error : bool
+ Whether to raise an AssertionError when a doctest fails. Default is
+ True.
+
+ Notes
+ -----
+ The doctests can be run by the user/developer by adding the ``doctests``
+ argument to the ``test()`` call. For example, to run all tests (including
+ doctests) for `numpy.lib`:
+
+ >>> np.lib.test(doctests=True) #doctest: +SKIP
+ """
+ from numpy.compat import npy_load_module
+ import doctest
+ if filename is None:
+ f = sys._getframe(1)
+ filename = f.f_globals['__file__']
+ name = os.path.splitext(os.path.basename(filename))[0]
+ m = npy_load_module(name, filename)
+
+ tests = doctest.DocTestFinder().find(m)
+ runner = doctest.DocTestRunner(verbose=False)
+
+ msg = []
+ if raise_on_error:
+ out = lambda s: msg.append(s)
+ else:
+ out = None
+
+ for test in tests:
+ runner.run(test, out=out)
+
+ if runner.failures > 0 and raise_on_error:
+ raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def raises(*args):
+ """Decorator to check for raised exceptions.
+
+ The decorated test function must raise one of the passed exceptions to
+ pass. If you want to test many assertions about exceptions in a single
+ test, you may want to use `assert_raises` instead.
+
+ .. warning::
+ This decorator is nose specific, do not use it if you are using a
+ different test framework.
+
+ Parameters
+ ----------
+ args : exceptions
+ The test passes if any of the passed exceptions is raised.
+
+ Raises
+ ------
+ AssertionError
+
+ Examples
+ --------
+
+ Usage::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ """
+ nose = import_nose()
+ return nose.tools.raises(*args)
+
+#
+# assert_raises and assert_raises_regex are taken from unittest.
+#
+import unittest
+
+
+class _Dummy(unittest.TestCase):
+ def nop(self):
+ pass
+
+_d = _Dummy('nop')
+
+def assert_raises(*args, **kwargs):
+ """
+ assert_raises(exception_class, callable, *args, **kwargs)
+ assert_raises(exception_class)
+
+ Fail unless an exception of class exception_class is thrown
+ by callable when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ Alternatively, `assert_raises` can be used as a context manager:
+
+ >>> from numpy.testing import assert_raises
+ >>> with assert_raises(ZeroDivisionError):
+ ... 1 / 0
+
+ is equivalent to
+
+ >>> def div(x, y):
+ ... return x / y
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ return _d.assertRaises(*args,**kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+ """
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
+ **kwargs)
+ assert_raises_regex(exception_class, expected_regexp)
+
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+
+ Alternatively, can be used as a context manager like `assert_raises`.
+
+ Name of this function adheres to Python 3.2+ reference, but should work in
+ all versions down to 2.6.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ if sys.version_info.major >= 3:
+ funcname = _d.assertRaisesRegex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = _d.assertRaisesRegexp
+
+ return funcname(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+ """
+ Apply a decorator to all methods in a class matching a regular expression.
+
+ The given decorator is applied to all public methods of `cls` that are
+ matched by the regular expression `testmatch`
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
+ with an underscore, are ignored.
+
+ Parameters
+ ----------
+ cls : class
+ Class whose methods to decorate.
+ decorator : function
+ Decorator to apply to methods
+ testmatch : compiled regexp or str, optional
+ The regular expression. Default value is None, in which case the
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+ is used.
+ If `testmatch` is a string, it is compiled to a regular expression
+ first.
+
+ """
+ if testmatch is None:
+ testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ else:
+ testmatch = re.compile(testmatch)
+ cls_attr = cls.__dict__
+
+ # delayed import to reduce startup time
+ from inspect import isfunction
+
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+ for function in methods:
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ continue
+ if testmatch.search(funcname) and not funcname.startswith('_'):
+ setattr(cls, funcname, decorator(function))
+ return
+
+
+def measure(code_str,times=1,label=None):
+ """
+ Return elapsed time for executing code in the namespace of the caller.
+
+ The supplied code string is compiled with the Python builtin ``compile``.
+ The precision of the timing is 10 milli-seconds. If the code will execute
+ fast on this timescale, it can be executed many times to get reasonable
+ timing accuracy.
+
+ Parameters
+ ----------
+ code_str : str
+ The code to be timed.
+ times : int, optional
+ The number of times the code is executed. Default is 1. The code is
+ only compiled once.
+ label : str, optional
+ A label to identify `code_str` with. This is passed into ``compile``
+ as the second argument (for run-time error messages).
+
+ Returns
+ -------
+ elapsed : float
+ Total elapsed time in seconds for executing `code_str` `times` times.
+
+ Examples
+ --------
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
+ ... times=times)
+ >>> print("Time for a single execution : ", etime / times, "s")
+ Time for a single execution : 0.005 s
+
+ """
+ frame = sys._getframe(1)
+ locs, globs = frame.f_locals, frame.f_globals
+
+ code = compile(code_str,
+ 'Test name: %s ' % label,
+ 'exec')
+ i = 0
+ elapsed = jiffies()
+ while i < times:
+ i += 1
+ exec(code, globs, locs)
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+ """
+ Check that ufuncs don't mishandle refcount of object `1`.
+ Used in a few regression tests.
+ """
+ if not HAS_REFCOUNT:
+ return True
+ import numpy as np, gc
+
+ b = np.arange(100*100).reshape(100, 100)
+ c = b
+ i = 1
+
+ gc.disable()
+ try:
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ finally:
+ gc.enable()
+ del d # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+ err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ tolerance.
+
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
+ It compares the difference between `actual` and `desired` to
+ ``atol + rtol * abs(desired)``.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ actual : array_like
+ Array obtained.
+ desired : array_like
+ Array desired.
+ rtol : float, optional
+ Relative tolerance.
+ atol : float, optional
+ Absolute tolerance.
+ equal_nan : bool, optional.
+ If True, NaNs will compare equal.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp, assert_array_max_ulp
+
+ Examples
+ --------
+ >>> x = [1e-5, 1e-3, 1e-1]
+ >>> y = np.arccos(np.cos(x))
+ >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ def compare(x, y):
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+ equal_nan=equal_nan)
+
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+ header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
+ assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+ verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+ """
+ Compare two arrays relatively to their spacing.
+
+ This is a relatively robust method to compare two arrays whose amplitude
+ is variable.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input arrays.
+ nulp : int, optional
+ The maximum number of unit in the last place for tolerance (see Notes).
+ Default is 1.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ AssertionError
+ If the spacing between `x` and `y` for one or more elements is larger
+ than `nulp`.
+
+ See Also
+ --------
+ assert_array_max_ulp : Check that all items of arrays differ in at most
+ N Units in the Last Place.
+ spacing : Return the distance between x and the nearest adjacent number.
+
+ Notes
+ -----
+ An assertion is raised if the following condition is not met::
+
+ abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
+
+ Examples
+ --------
+ >>> x = np.array([1., 1e-10, 1e-20])
+ >>> eps = np.finfo(x.dtype).eps
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+ Traceback (most recent call last):
+ ...
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ax = np.abs(x)
+ ay = np.abs(y)
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+ if not np.all(np.abs(x-y) <= ref):
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ msg = "X and Y are not equal to %d ULP" % nulp
+ else:
+ max_nulp = np.max(nulp_diff(x, y))
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+ raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+ """
+ Check that all items of arrays differ in at most N Units in the Last Place.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to be compared.
+ maxulp : int, optional
+ The maximum number of units in the last place that elements of `a` and
+ `b` can differ. Default is 1.
+ dtype : dtype, optional
+ Data-type to convert `a` and `b` to if given. Default is None.
+
+ Returns
+ -------
+ ret : ndarray
+ Array containing number of representable floating point numbers between
+ items in `a` and `b`.
+
+ Raises
+ ------
+ AssertionError
+ If one or more elements differ by more than `maxulp`.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
+ spacing.
+
+ Examples
+ --------
+ >>> a = np.linspace(0., 1., 100)
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ret = nulp_diff(a, b, dtype)
+ if not np.all(ret <= maxulp):
+ raise AssertionError("Arrays are not almost equal up to %g ULP" %
+ maxulp)
+ return ret
+
+
+def nulp_diff(x, y, dtype=None):
+ """For each item in x and y, return the number of representable floating
+ points between them.
+
+ Parameters
+ ----------
+ x : array_like
+ first input array
+ y : array_like
+ second input array
+ dtype : dtype, optional
+ Data-type to convert `x` and `y` to if given. Default is None.
+
+ Returns
+ -------
+ nulp : array_like
+ number of representable floating point numbers between each item in x
+ and y.
+
+ Examples
+ --------
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+ # there should be exactly one ULP between 1 and 1 + eps
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+ 1.0
+ """
+ import numpy as np
+ if dtype:
+ x = np.array(x, dtype=dtype)
+ y = np.array(y, dtype=dtype)
+ else:
+ x = np.array(x)
+ y = np.array(y)
+
+ t = np.common_type(x, y)
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ raise NotImplementedError("_nulp not implemented for complex array")
+
+ x = np.array(x, dtype=t)
+ y = np.array(y, dtype=t)
+
+ if not x.shape == y.shape:
+ raise ValueError("x and y do not have the same shape: %s - %s" %
+ (x.shape, y.shape))
+
+ def _diff(rx, ry, vdt):
+ diff = np.array(rx-ry, dtype=vdt)
+ return np.abs(diff)
+
+ rx = integer_repr(x)
+ ry = integer_repr(y)
+ return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+ # Reinterpret binary representation of the float as sign-magnitude:
+ # take into account two-complement representation
+ # See also
+ # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+ rx = x.view(vdt)
+ if not (rx.size == 1):
+ rx[rx < 0] = comp - rx[rx < 0]
+ else:
+ if rx < 0:
+ rx = comp - rx
+
+ return rx
+
+
+def integer_repr(x):
+ """Return the signed-magnitude interpretation of the binary representation of
+ x."""
+ import numpy as np
+ if x.dtype == np.float16:
+ return _integer_repr(x, np.int16, np.int16(-2**15))
+ elif x.dtype == np.float32:
+ return _integer_repr(x, np.int32, np.int32(-2**31))
+ elif x.dtype == np.float64:
+ return _integer_repr(x, np.int64, np.int64(-2**63))
+ else:
+ raise ValueError("Unsupported dtype %s" % x.dtype)
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with suppress_warnings() as sup:
+ l = sup.record(warning_class)
+ yield
+ if not len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+ """
+ Fail unless the given callable throws the specified warning.
+
+ A warning of class warning_class should be thrown by the callable when
+ invoked with arguments args and keyword arguments kwargs.
+ If a different type of warning is thrown, it will not be caught.
+
+ If called with all arguments other than the warning class omitted, may be
+ used as a context manager:
+
+ with assert_warns(SomeWarning):
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.4.0
+
+ Parameters
+ ----------
+ warning_class : class
+ The class defining the warning that `func` is expected to throw.
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_warns_context(warning_class)
+
+ func = args[0]
+ args = args[1:]
+ with _assert_warns_context(warning_class, name=func.__name__):
+ return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
+ yield
+ if len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("Got warnings%s: %s" % (name_str, l))
+
+
+def assert_no_warnings(*args, **kwargs):
+ """
+ Fail if the given callable produces any warnings.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_warnings():
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_no_warnings_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_warnings_context(name=func.__name__):
+ return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+ """
+ generator producing data with different alignment and offsets
+ to test simd vectorization
+
+ Parameters
+ ----------
+ dtype : dtype
+ data type to produce
+ type : string
+ 'unary': create data for unary operations, creates one input
+ and output array
+ 'binary': create data for unary operations, creates two input
+ and output array
+ max_size : integer
+ maximum size of data to produce
+
+ Returns
+ -------
+ if type is 'unary' yields one output, one input array and a message
+ containing information on the data
+ if type is 'binary' yields one output array, two input array and a message
+ containing information on the data
+
+ """
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+ for o in range(3):
+ for s in range(o + 2, max(o + 3, max_size)):
+ if type == 'unary':
+ inp = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+ d = inp()
+ yield d, d, ufmt % (o, o, s, dtype, 'in place')
+ yield out[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'out of place')
+ yield inp()[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'aliased')
+ yield inp()[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'aliased')
+ if type == 'binary':
+ inp1 = lambda: arange(s, dtype=dtype)[o:]
+ inp2 = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'out of place')
+ d = inp1()
+ yield d, d, inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place1')
+ d = inp2()
+ yield d, inp1(), d, bfmt % \
+ (o, o, o, s, dtype, 'in place2')
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'out of place')
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+ pass
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+ """Context manager to provide a temporary test folder.
+
+ All arguments are passed as this to the underlying tempfile.mkdtemp
+ function.
+
+ """
+ tmpdir = mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+ """Context manager for temporary files.
+
+ Context manager that returns the path to a closed temporary file. Its
+ parameters are the same as for tempfile.mkstemp and are passed directly
+ to that function. The underlying file is removed when the context is
+ exited, so it should be closed at that time.
+
+ Windows does not allow a temporary file to be opened if it is already
+ open, so the underlying file must be closed after opening before it
+ can be opened again.
+
+ """
+ fd, path = mkstemp(*args, **kwargs)
+ os.close(fd)
+ try:
+ yield path
+ finally:
+ os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ """ Context manager that resets warning registry for catching warnings
+
+ Warnings can be slippery, because, whenever a warning is triggered, Python
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
+ it impossible to retrigger the warning in this module, whatever you put in
+ the warnings filters. This context manager accepts a sequence of `modules`
+ as a keyword argument to its constructor and:
+
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
+ on entry;
+ * resets ``__warningregistry__`` to its previous state on exit.
+
+ This makes it possible to trigger any warning afresh inside the context
+ manager without disturbing the state of warnings outside.
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ Parameters
+ ----------
+ record : bool, optional
+ Specifies whether warnings should be captured by a custom
+ implementation of ``warnings.showwarning()`` and be appended to a list
+ returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+ modules : sequence, optional
+ Sequence of modules for which to reset warnings registry on entry and
+ restore on exit. To work correctly, all 'ignore' filters should
+ filter by one of these modules.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ ... warnings.simplefilter('always')
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+ ... # do something that raises a warning but ignore those in
+ ... # np.core.fromnumeric
+ """
+ class_modules = ()
+
+ def __init__(self, record=False, modules=()):
+ self.modules = set(modules).union(self.class_modules)
+ self._warnreg_copies = {}
+ super(clear_and_catch_warnings, self).__init__(record=record)
+
+ def __enter__(self):
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod_reg = mod.__warningregistry__
+ self._warnreg_copies[mod] = mod_reg.copy()
+ mod_reg.clear()
+ return super(clear_and_catch_warnings, self).__enter__()
+
+ def __exit__(self, *exc_info):
+ super(clear_and_catch_warnings, self).__exit__(*exc_info)
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod.__warningregistry__.clear()
+ if mod in self._warnreg_copies:
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings(object):
+ """
+ Context manager and decorator doing much the same as
+ ``warnings.catch_warnings``.
+
+ However, it also provides a filter mechanism to work around
+ http://bugs.python.org/issue4180.
+
+ This bug causes Python before 3.4 to not reliably show warnings again
+ after they have been ignored once (even within catch_warnings). It
+ means that no "ignore" filter can be used easily, since following
+ tests might need to see the warning. Additionally it allows easier
+ specificity for testing warnings and can be nested.
+
+ Parameters
+ ----------
+ forwarding_rule : str, optional
+ One of "always", "once", "module", or "location". Analogous to
+ the usual warnings module filter mode, it is useful to reduce
+ noise mostly on the outmost level. Unsuppressed and unrecorded
+ warnings will be forwarded based on this rule. Defaults to "always".
+ "location" is equivalent to the warnings "default", match by exact
+ location the warning warning originated from.
+
+ Notes
+ -----
+ Filters added inside the context manager will be discarded again
+ when leaving it. Upon entering all filters defined outside a
+ context will be applied automatically.
+
+ When a recording filter is added, matching warnings are stored in the
+ ``log`` attribute as well as in the list returned by ``record``.
+
+ If filters are added and the ``module`` keyword is given, the
+ warning registry of this module will additionally be cleared when
+ applying it, entering the context, or exiting it. This could cause
+ warnings to appear a second time after leaving the context if they
+ were configured to be printed once (default) and were already
+ printed before the context was entered.
+
+ Nesting this context manager will work as expected when the
+ forwarding rule is "always" (default). Unfiltered and unrecorded
+ warnings will be passed out and be matched by the outer level.
+ On the outmost level they will be printed (or caught by another
+ warnings context). The forwarding rule argument can modify this
+ behaviour.
+
+ Like ``catch_warnings`` this context manager is not threadsafe.
+
+ Examples
+ --------
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(DeprecationWarning, "Some text")
+ ... sup.filter(module=np.ma.core)
+ ... log = sup.record(FutureWarning, "Does this occur?")
+ ... command_giving_warnings()
+ ... # The FutureWarning was given once, the filtered warnings were
+ ... # ignored. All other warnings abide outside settings (may be
+ ... # printed/error)
+ ... assert_(len(log) == 1)
+ ... assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator:
+
+ >>> sup = suppress_warnings()
+ >>> sup.filter(module=np.ma.core) # module must match exact
+ >>> @sup
+ >>> def some_function():
+ ... # do something which causes a warning in np.ma.core
+ ... pass
+ """
+ def __init__(self, forwarding_rule="always"):
+ self._entered = False
+
+ # Suppressions are either instance or defined inside one with block:
+ self._suppressions = []
+
+ if forwarding_rule not in {"always", "module", "once", "location"}:
+ raise ValueError("unsupported forwarding rule.")
+ self._forwarding_rule = forwarding_rule
+
+ def _clear_registries(self):
+ if hasattr(warnings, "_filters_mutated"):
+ # clearing the registry should not be necessary on new pythons,
+ # instead the filters should be mutated.
+ warnings._filters_mutated()
+ return
+ # Simply clear the registry, this should normally be harmless,
+ # note that on new pythons it would be invalidated anyway.
+ for module in self._tmp_modules:
+ if hasattr(module, "__warningregistry__"):
+ module.__warningregistry__.clear()
+
+ def _filter(self, category=Warning, message="", module=None, record=False):
+ if record:
+ record = [] # The log where to store warnings
+ else:
+ record = None
+ if self._entered:
+ if module is None:
+ warnings.filterwarnings(
+ "always", category=category, message=message)
+ else:
+ module_regex = module.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=category, message=message,
+ module=module_regex)
+ self._tmp_modules.add(module)
+ self._clear_registries()
+
+ self._tmp_suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+ else:
+ self._suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+
+ return record
+
+ def filter(self, category=Warning, message="", module=None):
+ """
+ Add a new suppressing filter or apply it if the state is entered.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ self._filter(category=category, message=message, module=module,
+ record=False)
+
+ def record(self, category=Warning, message="", module=None):
+ """
+ Append a new recording filter or apply it if the state is entered.
+
+ All warnings matching will be appended to the ``log`` attribute.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Returns
+ -------
+ log : list
+ A list which will be filled with all matched warnings.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ return self._filter(category=category, message=message, module=module,
+ record=True)
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("cannot enter suppress_warnings twice.")
+
+ self._orig_show = warnings.showwarning
+ self._filters = warnings.filters
+ warnings.filters = self._filters[:]
+
+ self._entered = True
+ self._tmp_suppressions = []
+ self._tmp_modules = set()
+ self._forwarded = set()
+
+ self.log = [] # reset global log (no need to keep same list)
+
+ for cat, mess, _, mod, log in self._suppressions:
+ if log is not None:
+ del log[:] # clear the log
+ if mod is None:
+ warnings.filterwarnings(
+ "always", category=cat, message=mess)
+ else:
+ module_regex = mod.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=cat, message=mess,
+ module=module_regex)
+ self._tmp_modules.add(mod)
+ warnings.showwarning = self._showwarning
+ self._clear_registries()
+
+ return self
+
+ def __exit__(self, *exc_info):
+ warnings.showwarning = self._orig_show
+ warnings.filters = self._filters
+ self._clear_registries()
+ self._entered = False
+ del self._orig_show
+ del self._filters
+
+ def _showwarning(self, message, category, filename, lineno,
+ *args, **kwargs):
+ use_warnmsg = kwargs.pop("use_warnmsg", None)
+ for cat, _, pattern, mod, rec in (
+ self._suppressions + self._tmp_suppressions)[::-1]:
+ if (issubclass(category, cat) and
+ pattern.match(message.args[0]) is not None):
+ if mod is None:
+ # Message and category match, either recorded or ignored
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+ # Use startswith, because warnings strips the c or o from
+ # .pyc/.pyo files.
+ elif mod.__file__.startswith(filename):
+ # The message and module (filename) match
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+
+ # There is no filter in place, so pass to the outside handler
+ # unless we should only pass it once
+ if self._forwarding_rule == "always":
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno,
+ *args, **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+ return
+
+ if self._forwarding_rule == "once":
+ signature = (message.args, category)
+ elif self._forwarding_rule == "module":
+ signature = (message.args, category, filename)
+ elif self._forwarding_rule == "location":
+ signature = (message.args, category, filename, lineno)
+
+ if signature in self._forwarded:
+ return
+ self._forwarded.add(signature)
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno, *args,
+ **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+
+ def __call__(self, func):
+ """
+ Function decorator to apply certain suppressions to a whole
+ function.
+ """
+ @wraps(func)
+ def new_func(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+
+ return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ # not meaningful to test if there is no refcounting
+ if not HAS_REFCOUNT:
+ return
+
+ assert_(gc.isenabled())
+ gc.disable()
+ gc_debug = gc.get_debug()
+ try:
+ for i in range(100):
+ if gc.collect() == 0:
+ break
+ else:
+ raise RuntimeError(
+ "Unable to fully collect garbage - perhaps a __del__ method is "
+ "creating more reference cycles?")
+
+ gc.set_debug(gc.DEBUG_SAVEALL)
+ yield
+ # gc.collect returns the number of unreachable objects in cycles that
+ # were found -- we are checking that no cycles were created in the context
+ n_objects_in_cycles = gc.collect()
+ objects_in_cycles = gc.garbage[:]
+ finally:
+ del gc.garbage[:]
+ gc.set_debug(gc_debug)
+ gc.enable()
+
+ if n_objects_in_cycles:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError(
+ "Reference cycles were found{}: {} objects were collected, "
+ "of which {} are shown below:{}"
+ .format(
+ name_str,
+ n_objects_in_cycles,
+ len(objects_in_cycles),
+ ''.join(
+ "\n {} object with id={}:\n {}".format(
+ type(o).__name__,
+ id(o),
+ pprint.pformat(o).replace('\n', '\n ')
+ ) for o in objects_in_cycles
+ )
+ )
+ )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+ """
+ Fail if the given callable produces any reference cycles.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_gc_cycles():
+ do_something()
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ Nothing. The result is deliberately discarded to ensure that all cycles
+ are found.
+
+ """
+ if not args:
+ return _assert_no_gc_cycles_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_gc_cycles_context(name=func.__name__):
+ func(*args, **kwargs)
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.decorators import *
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.decorators is deprecated, "
+ "import from numpy.testing instead.",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.decorators import *
+++ /dev/null
-"""
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward. Decorators that return a new
-function object need to use
-::
-
- nose.tools.make_decorator(original_function)(decorator)
-
-in returning the decorator, in order to preserve meta-data such as
-function name, setup and teardown functions and so on - see
-``nose.tools`` for more information.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstact classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
-
-from .utils import SkipTest, assert_warns, HAS_REFCOUNT
-
-__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
- 'parametrize', '_needs_refcount',]
-
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to label as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print('Big, slow test')
-
- """
-
- t.slow = True
- return t
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Notes
- -----
- This decorator can't use the nose namespace, because it can be
- called from a non-test module. See also ``istest`` and ``nottest`` in
- ``nose.tools``.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing import dec
-
- @dec.setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
-
- def skip_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- # Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections_abc.Callable):
- skip_val = lambda: skip_condition()
- else:
- skip_val = lambda: skip_condition
-
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
-
- return "Skipping test: %s: %s" % (func.__name__, out)
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if nose.util.isgenerator(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
-
- return nose.tools.make_decorator(f)(skipper)
-
- return skip_decorator
-
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureException exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureException exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes
- KnownFailureException to be raised when `fail_condition` is True,
- and the function to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections_abc.Callable):
- fail_val = lambda: fail_condition()
- else:
- fail_val = lambda: fail_condition
-
- def knownfail_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
- from .noseclasses import KnownFailureException
-
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureException(msg)
- else:
- return f(*args, **kwargs)
- return nose.tools.make_decorator(f)(knownfailer)
-
- return knownfail_decorator
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- with assert_warns(DeprecationWarning):
- f(*args, **kwargs)
-
- if isinstance(conditional, collections_abc.Callable):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return nose.tools.make_decorator(f)(_deprecated_imp)
- else:
- return f
- return deprecate_decorator
-
-
-def parametrize(vars, input):
- """
- Pytest compatibility class. This implements the simplest level of
- pytest.mark.parametrize for use in nose as an aid in making the transition
- to pytest. It achieves that by adding a dummy var parameter and ignoring
- the doc_func parameter of the base class. It does not support variable
- substitution by name, nor does it support nesting or classes. See the
- pytest documentation for usage.
-
- .. versionadded:: 1.14.0
-
- """
- from .parameterized import parameterized
-
- return parameterized(input)
-
-_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
+++ /dev/null
-# These classes implement a doctest runner plugin for nose, a "known failure"
-# error class, and a customized TestProgram for NumPy.
-
-# Because this module imports nose directly, it should not
-# be used except by nosetester.py to avoid a general NumPy
-# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import doctest
-import inspect
-
-import numpy
-import nose
-from nose.plugins import doctests as npd
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-from nose.plugins.base import Plugin
-from nose.util import src
-from .nosetester import get_package_name
-from .utils import KnownFailureException, KnownFailureTest
-
-
-# Some of the classes in this module begin with 'Numpy' to clearly distinguish
-# them from the plethora of very similar names from nose/unittest/doctest
-
-#-----------------------------------------------------------------------------
-# Modified version of the one in the stdlib, that fixes a python bug (doctests
-# not found in extension modules, http://bugs.python.org/issue3158)
-class NumpyDocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
-
- doctest.DocTestFinder._find(self, tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
- from inspect import (
- isroutine, isclass, ismodule, isfunction, ismethod
- )
-
- # Look for tests in a module's contained objects.
- if ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val)):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((isfunction(val) or isclass(val) or
- ismethod(val) or isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
-# second-chance checker; if the default comparison doesn't
-# pass, then see if the expected output string contains flags that
-# tell us to ignore the output
-class NumpyOutputChecker(doctest.OutputChecker):
- def check_output(self, want, got, optionflags):
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret:
- if "#random" in want:
- return True
-
- # it would be useful to normalize endianness so that
- # bigendian machines don't fail all the tests (and there are
- # actually some bigendian examples in the doctests). Let's try
- # making them all little endian
- got = got.replace("'>", "'<")
- want = want.replace("'>", "'<")
-
- # try to normalize out 32 and 64 bit default int sizes
- for sz in [4, 8]:
- got = got.replace("'<i%d'" % sz, "int")
- want = want.replace("'<i%d'" % sz, "int")
-
- ret = doctest.OutputChecker.check_output(self, want,
- got, optionflags)
-
- return ret
-
-
-# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
-# its constructor that blocks non-default arguments from being passed
-# down into doctest.DocTestCase
-class NumpyDocTestCase(npd.DocTestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- self._nose_obj = obj
- doctest.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
-
-
-print_state = numpy.get_printoptions()
-
-class NumpyDoctest(npd.Doctest):
- name = 'numpydoctest' # call nosetests with --with-numpydoctest
- score = 1000 # load late, after doctest builtin
-
- # always use whitespace and ellipsis options for doctests
- doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- # files that should be ignored for doctests
- doctest_ignore = ['generate_numpy_api.py',
- 'setup.py']
-
- # Custom classes; class variables to allow subclassing
- doctest_case_class = NumpyDocTestCase
- out_check_class = NumpyOutputChecker
- test_finder_class = NumpyDocTestFinder
-
- # Don't use the standard doctest option handler; hard-code the option values
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- # Test doctests in 'test' files / directories. Standard plugin default
- # is False
- self.doctest_tests = True
- # Variable name; if defined, doctest results stored in this variable in
- # the top-level namespace. None is the standard default
- self.doctest_result_var = None
-
- def configure(self, options, config):
- # parent method sets enabled flag from command line --with-numpydoctest
- Plugin.configure(self, options, config)
- self.finder = self.test_finder_class()
- self.parser = doctest.DocTestParser()
- if self.enabled:
- # Pull standard doctest out of plugin list; there's no reason to run
- # both. In practice the Unplugger plugin above would cover us when
- # run from a standard numpy.test() call; this is just in case
- # someone wants to run our plugin outside the numpy.test() machinery
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
-
- def set_test_context(self, test):
- """ Configure `test` object to set test context
-
- We set the numpy / scipy standard doctest namespace
-
- Parameters
- ----------
- test : test object
- with ``globs`` dictionary defining namespace
-
- Returns
- -------
- None
-
- Notes
- -----
- `test` object modified in place
- """
- # set the namespace for tests
- pkg_name = get_package_name(os.path.dirname(test.filename))
-
- # Each doctest should execute in an environment equivalent to
- # starting Python and executing "import numpy as np", and,
- # for SciPy packages, an additional import of the local
- # package (so that scipy.linalg.basic.py's doctests have an
- # implicit "from scipy import linalg" as well.
- #
- # Note: __file__ allows the doctest in NoseTester to run
- # without producing an error
- test.globs = {'__builtins__':__builtins__,
- '__file__':'__main__',
- '__name__':'__main__',
- 'np':numpy}
- # add appropriate scipy import for SciPy tests
- if 'scipy' in pkg_name:
- p = pkg_name.split('.')
- p2 = p[-1]
- test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
-
- # Override test loading to customize test context (with set_test_context
- # method), set standard docstring options, and install our own test output
- # checker
- def loadTestsFromModule(self, module):
- if not self.matches(module.__name__):
- npd.log.debug("Doctest doesn't want module %s", module)
- return
- try:
- tests = self.finder.find(module)
- except AttributeError:
- # nose allows module.__test__ = False; doctest does not and
- # throws AttributeError
- return
- if not tests:
- return
- tests.sort()
- module_file = src(module.__file__)
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
- # Set test namespace; test altered in place
- self.set_test_context(test)
- yield self.doctest_case_class(test,
- optionflags=self.doctest_optflags,
- checker=self.out_check_class(),
- result_var=self.doctest_result_var)
-
- # Add an afterContext method to nose.plugins.doctests.Doctest in order
- # to restore print options to the original state after each doctest
- def afterContext(self):
- numpy.set_printoptions(**print_state)
-
- # Ignore NumPy-specific build files that shouldn't be searched for tests
- def wantFile(self, file):
- bn = os.path.basename(file)
- if bn in self.doctest_ignore:
- return False
- return npd.Doctest.wantFile(self, file)
-
-
-class Unplugger(object):
- """ Nose plugin to remove named plugin late in loading
-
- By default it removes the "doctest" plugin.
- """
- name = 'unplugger'
- enabled = True # always enabled
- score = 4000 # load late in order to be after builtins
-
- def __init__(self, to_unplug='doctest'):
- self.to_unplug = to_unplug
-
- def options(self, parser, env):
- pass
-
- def configure(self, options, config):
- # Pull named plugin out of plugins list
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != self.to_unplug]
-
-
-class KnownFailurePlugin(ErrorClassPlugin):
- '''Plugin that installs a KNOWNFAIL error class for the
- KnownFailureClass exception. When KnownFailure is raised,
- the exception will be logged in the knownfail attribute of the
- result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
- exception will not be counted as an error or failure.'''
- enabled = True
- knownfail = ErrorClass(KnownFailureException,
- label='KNOWNFAIL',
- isfailure=False)
-
- def options(self, parser, env=os.environ):
- env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
- parser.add_option('--no-knownfail', action='store_true',
- dest='noKnownFail', default=env.get(env_opt, False),
- help='Disable special handling of KnownFailure '
- 'exceptions')
-
- def configure(self, options, conf):
- if not self.can_configure:
- return
- self.conf = conf
- disable = getattr(options, 'noKnownFail', False)
- if disable:
- self.enabled = False
-
-KnownFailure = KnownFailurePlugin # backwards compat
-
-
-class FPUModeCheckPlugin(Plugin):
- """
- Plugin that checks the FPU mode before and after each test,
- raising failures if the test changed the mode.
- """
-
- def prepareTestCase(self, test):
- from numpy.core._multiarray_tests import get_fpu_mode
-
- def run(result):
- old_mode = get_fpu_mode()
- test.test(result)
- new_mode = get_fpu_mode()
-
- if old_mode != new_mode:
- try:
- raise AssertionError(
- "FPU mode changed from {0:#x} to {1:#x} during the "
- "test".format(old_mode, new_mode))
- except AssertionError:
- result.addFailure(test, sys.exc_info())
-
- return run
-
-
-# Class allows us to save the results of the tests in runTests - see runTests
-# method docstring for details
-class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
-
- Because nose currently discards the test result object, but we need
- to return it to the user, override TestProgram.runTests to retain
- the result
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
+++ /dev/null
-"""
-Nose test running.
-
-This module implements ``test()`` and ``bench()`` functions for NumPy modules.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-from numpy.compat import basestring
-import numpy as np
-
-from .utils import import_nose, suppress_warnings
-
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
-
-
-def get_package_name(filepath):
- """
- Given a path where a package is installed, determine its name.
-
- Parameters
- ----------
- filepath : str
- Path to a file. If the determination fails, "numpy" is returned.
-
- Examples
- --------
- >>> np.testing.nosetester.get_package_name('nonsense')
- 'numpy'
-
- """
-
- fullpath = filepath[:]
- pkg_name = []
- while 'site-packages' in filepath or 'dist-packages' in filepath:
- filepath, p2 = os.path.split(filepath)
- if p2 in ('site-packages', 'dist-packages'):
- break
- pkg_name.append(p2)
-
- # if package name determination failed, just default to numpy/scipy
- if not pkg_name:
- if 'scipy' in fullpath:
- return 'scipy'
- else:
- return 'numpy'
-
- # otherwise, reverse to get correct order and return
- pkg_name.reverse()
-
- # don't include the outer egg directory
- if pkg_name[0].endswith('.egg'):
- pkg_name.pop(0)
-
- return '.'.join(pkg_name)
-
-
-def run_module_suite(file_to_run=None, argv=None):
- """
- Run a test module.
-
- Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
- the command line
-
- Parameters
- ----------
- file_to_run : str, optional
- Path to test module, or None.
- By default, run the module from which this function is called.
- argv : list of strings
- Arguments to be passed to the nose test runner. ``argv[0]`` is
- ignored. All command line arguments accepted by ``nosetests``
- will work. If it is the default value None, sys.argv is used.
-
- .. versionadded:: 1.9.0
-
- Examples
- --------
- Adding the following::
-
- if __name__ == "__main__" :
- run_module_suite(argv=sys.argv)
-
- at the end of a test module will run the tests when that module is
- called in the python interpreter.
-
- Alternatively, calling::
-
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
-
- from an interpreter will run all the test routine in 'test_matlib.py'.
- """
- if file_to_run is None:
- f = sys._getframe(1)
- file_to_run = f.f_locals.get('__file__', None)
- if file_to_run is None:
- raise AssertionError
-
- if argv is None:
- argv = sys.argv + [file_to_run]
- else:
- argv = argv + [file_to_run]
-
- nose = import_nose()
- from .noseclasses import KnownFailurePlugin
- nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
-
-
-class NoseTester(object):
- """
- Nose test runner.
-
- This class is made available as numpy.testing.Tester, and a test function
- is typically added to a package's __init__.py like so::
-
- from numpy.testing import Tester
- test = Tester().test
-
- Calling this test function finds and runs all tests associated with the
- package and all its sub-packages.
-
- Attributes
- ----------
- package_path : str
- Full path to the package to test.
- package_name : str
- Name of the package to test.
-
- Parameters
- ----------
- package : module, str or None, optional
- The package to test. If a string, this should be the full path to
- the package. If None (default), `package` is set to the module from
- which `NoseTester` is initialized.
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- Default is "release".
- depth : int, optional
- If `package` is None, then this can be used to initialize from the
- module of the caller of (the caller of (...)) the code that
- initializes `NoseTester`. Default of 0 means the module of the
- immediate caller; higher values are useful for utility routines that
- want to initialize `NoseTester` objects on behalf of other code.
-
- """
- def __init__(self, package=None, raise_warnings="release", depth=0,
- check_fpu_mode=False):
- # Back-compat: 'None' used to mean either "release" or "develop"
- # depending on whether this was a release or develop version of
- # numpy. Those semantics were fine for testing numpy, but not so
- # helpful for downstream projects like scipy that use
- # numpy.testing. (They want to set this based on whether *they* are a
- # release or develop version, not whether numpy is.) So we continue to
- # accept 'None' for back-compat, but it's now just an alias for the
- # default "release".
- if raise_warnings is None:
- raise_warnings = "release"
-
- package_name = None
- if package is None:
- f = sys._getframe(1 + depth)
- package_path = f.f_locals.get('__file__', None)
- if package_path is None:
- raise AssertionError
- package_path = os.path.dirname(package_path)
- package_name = f.f_locals.get('__name__', None)
- elif isinstance(package, type(os)):
- package_path = os.path.dirname(package.__file__)
- package_name = getattr(package, '__name__', None)
- else:
- package_path = str(package)
-
- self.package_path = package_path
-
- # Find the package name under test; this name is used to limit coverage
- # reporting (if enabled).
- if package_name is None:
- package_name = get_package_name(package_path)
- self.package_name = package_name
-
- # Set to "release" in constructor in maintenance branches.
- self.raise_warnings = raise_warnings
-
- # Whether to check for FPU mode changes
- self.check_fpu_mode = check_fpu_mode
-
- def _test_argv(self, label, verbose, extra_argv):
- ''' Generate argv for nosetest command
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- see ``test`` docstring
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- argv : list
- command line arguments that will be passed to nose
- '''
- argv = [__file__, self.package_path, '-s']
- if label and label != 'full':
- if not isinstance(label, basestring):
- raise TypeError('Selection label should be a string')
- if label == 'fast':
- label = 'not slow'
- argv += ['-A', label]
- argv += ['--verbosity', str(verbose)]
-
- # When installing with setuptools, and also in some other cases, the
- # test_*.py files end up marked +x executable. Nose, by default, does
- # not run files marked with +x as they might be scripts. However, in
- # our case nose only looks for test_*.py files under the package
- # directory, which should be safe.
- argv += ['--exe']
-
- if extra_argv:
- argv += extra_argv
- return argv
-
- def _show_system_info(self):
- nose = import_nose()
-
- import numpy
- print("NumPy version %s" % numpy.__version__)
- relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
- print("NumPy relaxed strides checking option:", relaxed_strides)
- npdir = os.path.dirname(numpy.__file__)
- print("NumPy is installed in %s" % npdir)
-
- if 'scipy' in self.package_name:
- import scipy
- print("SciPy version %s" % scipy.__version__)
- spdir = os.path.dirname(scipy.__file__)
- print("SciPy is installed in %s" % spdir)
-
- pyversion = sys.version.replace('\n', '')
- print("Python version %s" % pyversion)
- print("nose version %d.%d.%d" % nose.__versioninfo__)
-
- def _get_custom_doctester(self):
- """ Return instantiated plugin for doctests
-
- Allows subclassing of this class to override doctester
-
- A return value of None means use the nose builtin doctest plugin
- """
- from .noseclasses import NumpyDoctest
- return NumpyDoctest()
-
- def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, timer=False):
- """
- Run tests for module using nose.
-
- This method does the heavy lifting for the `test` method. It takes all
- the same arguments, for details see `test`.
-
- See Also
- --------
- test
-
- """
- # fail with nice error message if nose is not present
- import_nose()
- # compile argv
- argv = self._test_argv(label, verbose, extra_argv)
- # our way of doing coverage
- if coverage:
- argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
- '--cover-tests', '--cover-erase']
-
- if timer:
- if timer is True:
- argv += ['--with-timer']
- elif isinstance(timer, int):
- argv += ['--with-timer', '--timer-top-n', str(timer)]
-
- # construct list of plugins
- import nose.plugins.builtin
- from nose.plugins import EntryPointPluginManager
- from .noseclasses import (KnownFailurePlugin, Unplugger,
- FPUModeCheckPlugin)
- plugins = [KnownFailurePlugin()]
- plugins += [p() for p in nose.plugins.builtin.plugins]
- if self.check_fpu_mode:
- plugins += [FPUModeCheckPlugin()]
- argv += ["--with-fpumodecheckplugin"]
- try:
- # External plugins (like nose-timer)
- entrypoint_manager = EntryPointPluginManager()
- entrypoint_manager.loadPlugins()
- plugins += [p for p in entrypoint_manager.plugins]
- except ImportError:
- # Relies on pkg_resources, not a hard dependency
- pass
-
- # add doctesting if required
- doctest_argv = '--with-doctest' in argv
- if doctests == False and doctest_argv:
- doctests = True
- plug = self._get_custom_doctester()
- if plug is None:
- # use standard doctesting
- if doctests and not doctest_argv:
- argv += ['--with-doctest']
- else: # custom doctesting
- if doctest_argv: # in fact the unplugger would take care of this
- argv.remove('--with-doctest')
- plugins += [Unplugger('doctest'), plug]
- if doctests:
- argv += ['--with-' + plug.name]
- return argv, plugins
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None,
- timer=False):
- """
- Run tests for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the tests to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow tests as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
- doctests : bool, optional
- If True, run doctests in module. Default is False.
- coverage : bool, optional
- If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- The default is to use the class initialization value.
- timer : bool or int, optional
- Timing of individual tests with ``nose-timer`` (which needs to be
- installed). If True, time tests and report on all of them.
- If an integer (say ``N``), report timing results for ``N`` slowest
- tests.
-
- Returns
- -------
- result : object
- Returns the result of running the tests as a
- ``nose.result.TextTestResult`` object.
-
- Notes
- -----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
-
- >>> np.lib.test() #doctest: +SKIP
-
- Examples
- --------
- >>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
- ...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
- """
-
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- from . import utils
- utils.verbose = verbose
-
- argv, plugins = self.prepare_test_args(
- label, verbose, extra_argv, doctests, coverage, timer)
-
- if doctests:
- print("Running unit tests and doctests for %s" % self.package_name)
- else:
- print("Running unit tests for %s" % self.package_name)
-
- self._show_system_info()
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
- if raise_warnings is None:
- raise_warnings = self.raise_warnings
-
- _warn_opts = dict(develop=(Warning,),
- release=())
- if isinstance(raise_warnings, basestring):
- raise_warnings = _warn_opts[raise_warnings]
-
- with suppress_warnings("location") as sup:
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # Set all warnings to 'warn', this is because the default 'once'
- # has the bad property of possibly shadowing later warnings.
- warnings.filterwarnings('always')
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- sup.filter(message='Not importing directory')
- sup.filter(message="numpy.dtype size changed")
- sup.filter(message="numpy.ufunc size changed")
- sup.filter(category=np.ModuleDeprecationWarning)
- # Filter out boolean '-' deprecation messages. This allows
- # older versions of scipy to test without a flood of messages.
- sup.filter(message=".*boolean negative.*")
- sup.filter(message=".*boolean subtract.*")
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- from ...distutils import cpuinfo
- sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
- # Filter out some deprecation warnings inside nose 1.3.7 when run
- # on python 3.5b2. See
- # https://github.com/nose-devs/nose/issues/929
- # Note: it is hard to filter based on module for sup (lineno could
- # be implemented).
- warnings.filterwarnings("ignore", message=".*getargspec.*",
- category=DeprecationWarning,
- module=r"nose\.")
-
- from .noseclasses import NumpyTestProgram
-
- t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
-
- return t.result
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- """
- Run benchmarks for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the benchmarks to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow benchmarks as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- success : bool
- Returns True if running the benchmarks works, False if an error
- occurred.
-
- Notes
- -----
- Benchmarks are like tests, but have names starting with "bench" instead
- of "test", and can be found under the "benchmarks" sub-directory of the
- module.
-
- Each NumPy module exposes `bench` in its namespace to run all benchmarks
- for it.
-
- Examples
- --------
- >>> success = np.lib.bench() #doctest: +SKIP
- Running benchmarks for numpy.lib
- ...
- using 562341 items:
- unique:
- 0.11
- unique1d:
- 0.11
- ratio: 1.0
- nUnique: 56230 == 56230
- ...
- OK
-
- >>> success #doctest: +SKIP
- True
-
- """
-
- print("Running benchmarks for %s" % self.package_name)
- self._show_system_info()
-
- argv = self._test_argv(label, verbose, extra_argv)
- argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
-
- # import nose or make informative error
- nose = import_nose()
-
- # get plugin to disable doctests
- from .noseclasses import Unplugger
- add_plugins = [Unplugger('doctest')]
-
- return nose.run(argv=argv, addplugins=add_plugins)
-
-
-def _numpy_tester():
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- mode = "develop"
- else:
- mode = "release"
- return NoseTester(raise_warnings=mode, depth=1,
- check_fpu_mode=True)
+++ /dev/null
-"""
-tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
-
-Unless stated otherwise in the source files, all code is copyright 2010 David
-Wolever <david@wolever.net>. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation are those
-of the authors and should not be interpreted as representing official policies,
-either expressed or implied, of David Wolever.
-
-"""
-import re
-import sys
-import inspect
-import warnings
-from functools import wraps
-from types import MethodType as MethodType
-from collections import namedtuple
-
-try:
- from collections import OrderedDict as MaybeOrderedDict
-except ImportError:
- MaybeOrderedDict = dict
-
-from unittest import TestCase
-
-PY3 = sys.version_info[0] == 3
-PY2 = sys.version_info[0] == 2
-
-
-if PY3:
- # Python 3 doesn't have an InstanceType, so just use a dummy type.
- class InstanceType():
- pass
- lzip = lambda *a: list(zip(*a))
- text_type = str
- string_types = str,
- bytes_type = bytes
- def make_method(func, instance, type):
- if instance is None:
- return func
- return MethodType(func, instance)
-else:
- from types import InstanceType
- lzip = zip
- text_type = unicode
- bytes_type = str
- string_types = basestring,
- def make_method(func, instance, type):
- return MethodType(func, instance, type)
-
-_param = namedtuple("param", "args kwargs")
-
-class param(_param):
- """ Represents a single parameter to a test case.
-
- For example::
-
- >>> p = param("foo", bar=16)
- >>> p
- param("foo", bar=16)
- >>> p.args
- ('foo', )
- >>> p.kwargs
- {'bar': 16}
-
- Intended to be used as an argument to ``@parameterized``::
-
- @parameterized([
- param("foo", bar=16),
- ])
- def test_stuff(foo, bar=16):
- pass
- """
-
- def __new__(cls, *args , **kwargs):
- return _param.__new__(cls, args, kwargs)
-
- @classmethod
- def explicit(cls, args=None, kwargs=None):
- """ Creates a ``param`` by explicitly specifying ``args`` and
- ``kwargs``::
-
- >>> param.explicit([1,2,3])
- param(*(1, 2, 3))
- >>> param.explicit(kwargs={"foo": 42})
- param(*(), **{"foo": "42"})
- """
- args = args or ()
- kwargs = kwargs or {}
- return cls(*args, **kwargs)
-
- @classmethod
- def from_decorator(cls, args):
- """ Returns an instance of ``param()`` for ``@parameterized`` argument
- ``args``::
-
- >>> param.from_decorator((42, ))
- param(args=(42, ), kwargs={})
- >>> param.from_decorator("foo")
- param(args=("foo", ), kwargs={})
- """
- if isinstance(args, param):
- return args
- elif isinstance(args, string_types):
- args = (args, )
- try:
- return cls(*args)
- except TypeError as e:
- if "after * must be" not in str(e):
- raise
- raise TypeError(
- "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
- %(args, args),
- )
-
- def __repr__(self):
- return "param(*%r, **%r)" %self
-
-
-class QuietOrderedDict(MaybeOrderedDict):
- """ When OrderedDict is available, use it to make sure that the kwargs in
- doc strings are consistently ordered. """
- __str__ = dict.__str__
- __repr__ = dict.__repr__
-
-
-def parameterized_argument_value_pairs(func, p):
- """Return tuples of parameterized arguments and their values.
-
- This is useful if you are writing your own doc_func
- function and need to know the values for each parameter name::
-
- >>> def func(a, foo=None, bar=42, **kwargs): pass
- >>> p = param(1, foo=7, extra=99)
- >>> parameterized_argument_value_pairs(func, p)
- [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
-
- If the function's first argument is named ``self`` then it will be
- ignored::
-
- >>> def func(self, a): pass
- >>> p = param(1)
- >>> parameterized_argument_value_pairs(func, p)
- [("a", 1)]
-
- Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
-
- >>> def func(foo, *args): pass
- >>> p = param(1)
- >>> parameterized_argument_value_pairs(func, p)
- [("foo", 1)]
- >>> p = param(1, 16)
- >>> parameterized_argument_value_pairs(func, p)
- [("foo", 1), ("*args", (16, ))]
- """
- argspec = inspect.getargspec(func)
- arg_offset = 1 if argspec.args[:1] == ["self"] else 0
-
- named_args = argspec.args[arg_offset:]
-
- result = lzip(named_args, p.args)
- named_args = argspec.args[len(result) + arg_offset:]
- varargs = p.args[len(result):]
-
- result.extend([
- (name, p.kwargs.get(name, default))
- for (name, default)
- in zip(named_args, argspec.defaults or [])
- ])
-
- seen_arg_names = set([ n for (n, _) in result ])
- keywords = QuietOrderedDict(sorted([
- (name, p.kwargs[name])
- for name in p.kwargs
- if name not in seen_arg_names
- ]))
-
- if varargs:
- result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
-
- if keywords:
- result.append(("**%s" %(argspec.keywords, ), keywords))
-
- return result
-
-def short_repr(x, n=64):
- """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
-
- >>> short_repr("foo")
- u"foo"
- >>> short_repr("123456789", n=4)
- u"12...89"
- """
-
- x_repr = repr(x)
- if isinstance(x_repr, bytes_type):
- try:
- x_repr = text_type(x_repr, "utf-8")
- except UnicodeDecodeError:
- x_repr = text_type(x_repr, "latin1")
- if len(x_repr) > n:
- x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
- return x_repr
-
-def default_doc_func(func, num, p):
- if func.__doc__ is None:
- return None
-
- all_args_with_values = parameterized_argument_value_pairs(func, p)
-
- # Assumes that the function passed is a bound method.
- descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
-
- # The documentation might be a multiline string, so split it
- # and just work with the first string, ignoring the period
- # at the end if there is one.
- first, nl, rest = func.__doc__.lstrip().partition("\n")
- suffix = ""
- if first.endswith("."):
- suffix = "."
- first = first[:-1]
- args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
- return "".join([first.rstrip(), args, suffix, nl, rest])
-
-def default_name_func(func, num, p):
- base_name = func.__name__
- name_suffix = "_%s" %(num, )
- if len(p.args) > 0 and isinstance(p.args[0], string_types):
- name_suffix += "_" + parameterized.to_safe_name(p.args[0])
- return base_name + name_suffix
-
-
-# force nose for numpy purposes.
-_test_runner_override = 'nose'
-_test_runner_guess = False
-_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
-_test_runner_aliases = {
- "_pytest": "pytest",
-}
-
-def set_test_runner(name):
- global _test_runner_override
- if name not in _test_runners:
- raise TypeError(
- "Invalid test runner: %r (must be one of: %s)"
- %(name, ", ".join(_test_runners)),
- )
- _test_runner_override = name
-
-def detect_runner():
- """ Guess which test runner we're using by traversing the stack and looking
- for the first matching module. This *should* be reasonably safe, as
- it's done during test disocvery where the test runner should be the
- stack frame immediately outside. """
- if _test_runner_override is not None:
- return _test_runner_override
- global _test_runner_guess
- if _test_runner_guess is False:
- stack = inspect.stack()
- for record in reversed(stack):
- frame = record[0]
- module = frame.f_globals.get("__name__").partition(".")[0]
- if module in _test_runner_aliases:
- module = _test_runner_aliases[module]
- if module in _test_runners:
- _test_runner_guess = module
- break
- if record[1].endswith("python2.6/unittest.py"):
- _test_runner_guess = "unittest"
- break
- else:
- _test_runner_guess = None
- return _test_runner_guess
-
-class parameterized(object):
- """ Parameterize a test case::
-
- class TestInt(object):
- @parameterized([
- ("A", 10),
- ("F", 15),
- param("10", 42, base=42)
- ])
- def test_int(self, input, expected, base=16):
- actual = int(input, base=base)
- assert_equal(actual, expected)
-
- @parameterized([
- (2, 3, 5)
- (3, 5, 8),
- ])
- def test_add(a, b, expected):
- assert_equal(a + b, expected)
- """
-
- def __init__(self, input, doc_func=None):
- self.get_input = self.input_as_callable(input)
- self.doc_func = doc_func or default_doc_func
-
- def __call__(self, test_func):
- self.assert_not_in_testcase_subclass()
-
- @wraps(test_func)
- def wrapper(test_self=None):
- test_cls = test_self and type(test_self)
- if test_self is not None:
- if issubclass(test_cls, InstanceType):
- raise TypeError((
- "@parameterized can't be used with old-style classes, but "
- "%r has an old-style class. Consider using a new-style "
- "class, or '@parameterized.expand' "
- "(see http://stackoverflow.com/q/54867/71522 for more "
- "information on old-style classes)."
- ) %(test_self, ))
-
- original_doc = wrapper.__doc__
- for num, args in enumerate(wrapper.parameterized_input):
- p = param.from_decorator(args)
- unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
- try:
- wrapper.__doc__ = nose_tuple[0].__doc__
- # Nose uses `getattr(instance, test_func.__name__)` to get
- # a method bound to the test instance (as opposed to a
- # method bound to the instance of the class created when
- # tests were being enumerated). Set a value here to make
- # sure nose can get the correct test method.
- if test_self is not None:
- setattr(test_cls, test_func.__name__, unbound_func)
- yield nose_tuple
- finally:
- if test_self is not None:
- delattr(test_cls, test_func.__name__)
- wrapper.__doc__ = original_doc
- wrapper.parameterized_input = self.get_input()
- wrapper.parameterized_func = test_func
- test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
- return wrapper
-
- def param_as_nose_tuple(self, test_self, func, num, p):
- nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
- nose_func.__doc__ = self.doc_func(func, num, p)
- # Track the unbound function because we need to setattr the unbound
- # function onto the class for nose to work (see comments above), and
- # Python 3 doesn't let us pull the function out of a bound method.
- unbound_func = nose_func
- if test_self is not None:
- # Under nose on Py2 we need to return an unbound method to make
- # sure that the `self` in the method is properly shared with the
- # `self` used in `setUp` and `tearDown`. But only there. Everyone
- # else needs a bound method.
- func_self = (
- None if PY2 and detect_runner() == "nose" else
- test_self
- )
- nose_func = make_method(nose_func, func_self, type(test_self))
- return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
-
- def assert_not_in_testcase_subclass(self):
- parent_classes = self._terrible_magic_get_defining_classes()
- if any(issubclass(cls, TestCase) for cls in parent_classes):
- raise Exception("Warning: '@parameterized' tests won't work "
- "inside subclasses of 'TestCase' - use "
- "'@parameterized.expand' instead.")
-
- def _terrible_magic_get_defining_classes(self):
- """ Returns the set of parent classes of the class currently being defined.
- Will likely only work if called from the ``parameterized`` decorator.
- This function is entirely @brandon_rhodes's fault, as he suggested
- the implementation: http://stackoverflow.com/a/8793684/71522
- """
- stack = inspect.stack()
- if len(stack) <= 4:
- return []
- frame = stack[4]
- code_context = frame[4] and frame[4][0].strip()
- if not (code_context and code_context.startswith("class ")):
- return []
- _, _, parents = code_context.partition("(")
- parents, _, _ = parents.partition(")")
- return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
-
- @classmethod
- def input_as_callable(cls, input):
- if callable(input):
- return lambda: cls.check_input_values(input())
- input_values = cls.check_input_values(input)
- return lambda: input_values
-
- @classmethod
- def check_input_values(cls, input_values):
- # Explicitly convert non-list inputs to a list so that:
- # 1. A helpful exception will be raised if they aren't iterable, and
- # 2. Generators are unwrapped exactly once (otherwise `nosetests
- # --processes=n` has issues; see:
- # https://github.com/wolever/nose-parameterized/pull/31)
- if not isinstance(input_values, list):
- input_values = list(input_values)
- return [ param.from_decorator(p) for p in input_values ]
-
- @classmethod
- def expand(cls, input, name_func=None, doc_func=None, **legacy):
- """ A "brute force" method of parameterizing test cases. Creates new
- test cases and injects them into the namespace that the wrapped
- function is being defined in. Useful for parameterizing tests in
- subclasses of 'UnitTest', where Nose test generators don't work.
-
- >>> @parameterized.expand([("foo", 1, 2)])
- ... def test_add1(name, input, expected):
- ... actual = add1(input)
- ... assert_equal(actual, expected)
- ...
- >>> locals()
- ... 'test_add1_foo_0': <function ...> ...
- >>>
- """
-
- if "testcase_func_name" in legacy:
- warnings.warn("testcase_func_name= is deprecated; use name_func=",
- DeprecationWarning, stacklevel=2)
- if not name_func:
- name_func = legacy["testcase_func_name"]
-
- if "testcase_func_doc" in legacy:
- warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
- DeprecationWarning, stacklevel=2)
- if not doc_func:
- doc_func = legacy["testcase_func_doc"]
-
- doc_func = doc_func or default_doc_func
- name_func = name_func or default_name_func
-
- def parameterized_expand_wrapper(f, instance=None):
- stack = inspect.stack()
- frame = stack[1]
- frame_locals = frame[0].f_locals
-
- parameters = cls.input_as_callable(input)()
- for num, p in enumerate(parameters):
- name = name_func(f, num, p)
- frame_locals[name] = cls.param_as_standalone_func(p, f, name)
- frame_locals[name].__doc__ = doc_func(f, num, p)
-
- f.__test__ = False
- return parameterized_expand_wrapper
-
- @classmethod
- def param_as_standalone_func(cls, p, func, name):
- @wraps(func)
- def standalone_func(*a):
- return func(*(a + p.args), **p.kwargs)
- standalone_func.__name__ = name
-
- # place_as is used by py.test to determine what source file should be
- # used for this test.
- standalone_func.place_as = func
-
- # Remove __wrapped__ because py.test will try to look at __wrapped__
- # to determine which parameters should be used with this test case,
- # and obviously we don't need it to do any parameterization.
- try:
- del standalone_func.__wrapped__
- except AttributeError:
- pass
- return standalone_func
-
- @classmethod
- def to_safe_name(cls, s):
- return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
+++ /dev/null
-"""
-Utility function to facilitate testing.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import re
-import operator
-import warnings
-from functools import partial, wraps
-import shutil
-import contextlib
-from tempfile import mkdtemp, mkstemp
-from unittest.case import SkipTest
-
-from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-__all__ = [
- 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
- 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
- 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
- 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
- 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
- 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
- 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
- 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
- ]
-
-
-class KnownFailureException(Exception):
- '''Raise this exception to mark a test as a known failing test.'''
- pass
-
-
-KnownFailureTest = KnownFailureException # backwards compat
-verbose = 0
-
-IS_PYPY = '__pypy__' in sys.modules
-HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
-
-
-def import_nose():
- """ Import nose only when needed.
- """
- nose_is_good = True
- minimum_nose_version = (1, 0, 0)
- try:
- import nose
- except ImportError:
- nose_is_good = False
- else:
- if nose.__versioninfo__ < minimum_nose_version:
- nose_is_good = False
-
- if not nose_is_good:
- msg = ('Need nose >= %d.%d.%d for tests - see '
- 'http://nose.readthedocs.io' %
- minimum_nose_version)
- raise ImportError(msg)
-
- return nose
-
-
-def assert_(val, msg=''):
- """
- Assert that works in release mode.
- Accepts callable msg to allow deferring evaluation until failure.
-
- The Python built-in ``assert`` does not work when executing code in
- optimized mode (the ``-O`` flag) - no byte-code is generated for it.
-
- For documentation on usage, refer to the Python documentation.
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if not val:
- try:
- smsg = msg()
- except TypeError:
- smsg = msg
- raise AssertionError(smsg)
-
-
-def gisnan(x):
- """like isnan, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isnan and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isnan
- st = isnan(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isnan not supported for this type")
- return st
-
-
-def gisfinite(x):
- """like isfinite, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isfinite and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, errstate
- with errstate(invalid='ignore'):
- st = isfinite(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isfinite not supported for this type")
- return st
-
-
-def gisinf(x):
- """like isinf, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isinf and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, errstate
- with errstate(invalid='ignore'):
- st = isinf(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isinf not supported for this type")
- return st
-
-
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
-
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
-elif sys.platform[:5] == 'linux':
-
- def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """
- Return virtual memory size in bytes of the running python.
-
- """
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[22])
- except Exception:
- return
-else:
- def memusage():
- """
- Return memory usage of running python. [Not implemented]
-
- """
- raise NotImplementedError
-
-
-if sys.platform[:5] == 'linux':
- def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
- _load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[13])
- except Exception:
- return int(100*(time.time()-_load_time[0]))
-else:
- # os.getpid is not in all platforms available.
- # Using time is safe but inaccurate, especially when process
- # was suspended or sleeping.
- def jiffies(_load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- return int(100*(time.time()-_load_time[0]))
-
-
-def build_err_msg(arrays, err_msg, header='Items are not equal:',
- verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
- msg = ['\n' + header]
- if err_msg:
- if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
- msg = [msg[0] + ' ' + err_msg]
- else:
- msg.append(err_msg)
- if verbose:
- for i, a in enumerate(arrays):
-
- if isinstance(a, ndarray):
- # precision argument is only needed if the objects are ndarrays
- r_func = partial(array_repr, precision=precision)
- else:
- r_func = repr
-
- try:
- r = r_func(a)
- except Exception as exc:
- r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
- if r.count('\n') > 3:
- r = '\n'.join(r.splitlines()[:3])
- r += '...'
- msg.append(' %s: %s' % (names[i], r))
- return '\n'.join(msg)
-
-
-def assert_equal(actual, desired, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal.
-
- Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
- check that all elements of these objects are equal. An exception is raised
- at the first conflicting values.
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal.
-
- Examples
- --------
- >>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- item=1
- ACTUAL: 5
- DESIRED: 6
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if isinstance(desired, dict):
- if not isinstance(actual, dict):
- raise AssertionError(repr(type(actual)))
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k, i in desired.items():
- if k not in actual:
- raise AssertionError(repr(k))
- assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
- return
- if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k in range(len(desired)):
- assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
- return
- from numpy.core import ndarray, isscalar, signbit
- from numpy.lib import iscomplexobj, real, imag
- if isinstance(actual, ndarray) or isinstance(desired, ndarray):
- return assert_array_equal(actual, desired, err_msg, verbose)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_equal(actualr, desiredr)
- assert_equal(actuali, desiredi)
- except AssertionError:
- raise AssertionError(msg)
-
- # isscalar test to check cases such as [np.nan] != np.nan
- if isscalar(desired) != isscalar(actual):
- raise AssertionError(msg)
-
- # Inf/nan/negative zero handling
- try:
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan and isactnan:
- return # both nan, so equal
-
- # handle signed zero specially for floats
- if desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
-
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- isdesnat = isnat(desired)
- isactnat = isnat(actual)
- dtypes_match = array(desired).dtype.type == array(actual).dtype.type
- if isdesnat and isactnat:
- # If both are NaT (and have the same dtype -- datetime or
- # timedelta) they are considered equal.
- if dtypes_match:
- return
- else:
- raise AssertionError(msg)
-
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- # Explicitly use __eq__ for comparison, gh-2552
- if not (desired == actual):
- raise AssertionError(msg)
-
- except (DeprecationWarning, FutureWarning) as e:
- # this handles the case when the two types are not even comparable
- if 'elementwise == comparison' in e.args[0]:
- raise AssertionError(msg)
- else:
- raise
-
-
-def print_assert_equal(test_string, actual, desired):
- """
- Test if two objects are equal, and print an error message if test fails.
-
- The test is performed with ``actual == desired``.
-
- Parameters
- ----------
- test_string : str
- The message supplied to AssertionError.
- actual : object
- The object to test for equality against `desired`.
- desired : object
- The expected result.
-
- Examples
- --------
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
- Traceback (most recent call last):
- ...
- AssertionError: Test XYZ of func xyz failed
- ACTUAL:
- [0, 1]
- DESIRED:
- [0, 2]
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import pprint
-
- if not (actual == desired):
- msg = StringIO()
- msg.write(test_string)
- msg.write(' failed\nACTUAL: \n')
- pprint.pprint(actual, msg)
- msg.write('DESIRED: \n')
- pprint.pprint(desired, msg)
- raise AssertionError(msg.getvalue())
-
-
-def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies that the elements of ``actual`` and ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation in `assert_array_almost_equal` did up to rounding
- vagaries. An exception is raised at conflicting values. For ndarrays this
- delegates to assert_array_almost_equal
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- decimal : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> import numpy.testing as npt
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
-
- >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
- ... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import ndarray
- from numpy.lib import iscomplexobj, real, imag
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- def _build_err_msg():
- header = ('Arrays are not almost equal to %d decimals' % decimal)
- return build_err_msg([actual, desired], err_msg, verbose=verbose,
- header=header)
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_almost_equal(actualr, desiredr, decimal=decimal)
- assert_almost_equal(actuali, desiredi, decimal=decimal)
- except AssertionError:
- raise AssertionError(_build_err_msg())
-
- if isinstance(actual, (ndarray, tuple, list)) \
- or isinstance(desired, (ndarray, tuple, list)):
- return assert_array_almost_equal(actual, desired, decimal, err_msg)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(_build_err_msg())
- else:
- if not desired == actual:
- raise AssertionError(_build_err_msg())
- return
- except (NotImplementedError, TypeError):
- pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
- raise AssertionError(_build_err_msg())
-
-
-def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to significant
- digits.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- Given two numbers, check that they are approximately equal.
- Approximately equal is defined as the number of significant digits
- that agree.
-
- Parameters
- ----------
- actual : scalar
- The object to check.
- desired : scalar
- The expected object.
- significant : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
-
- the evaluated condition that raises the exception is
-
- >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
- True
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- (actual, desired) = map(float, (actual, desired))
- if desired == actual:
- return
- # Normalized the numbers to be in range (-10.0,10.0)
- # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- with np.errstate(invalid='ignore'):
- scale = 0.5*(np.abs(desired) + np.abs(actual))
- scale = np.power(10, np.floor(np.log10(scale)))
- try:
- sc_desired = desired/scale
- except ZeroDivisionError:
- sc_desired = 0.0
- try:
- sc_actual = actual/scale
- except ZeroDivisionError:
- sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- except (TypeError, NotImplementedError):
- pass
- if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
- raise AssertionError(msg)
-
-
-def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
- header='', precision=6, equal_nan=True,
- equal_inf=True):
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, isinf, any, inf
- x = array(x, copy=False, subok=True)
- y = array(y, copy=False, subok=True)
-
- def isnumber(x):
- return x.dtype.char in '?bhilqpBHILQPefdgFDG'
-
- def istime(x):
- return x.dtype.char in "Mm"
-
- def chk_same_position(x_id, y_id, hasval='nan'):
- """Handling nan/inf: check that x and y have the nan/inf at the same
- locations."""
- try:
- assert_array_equal(x_id, y_id)
- except AssertionError:
- msg = build_err_msg([x, y],
- err_msg + '\nx and y %s location mismatch:'
- % (hasval), verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- try:
- cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
- if not cond:
- msg = build_err_msg([x, y],
- err_msg
- + '\n(shapes %s, %s mismatch)' % (x.shape,
- y.shape),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- if isnumber(x) and isnumber(y):
- has_nan = has_inf = False
- if equal_nan:
- x_isnan, y_isnan = isnan(x), isnan(y)
- # Validate that NaNs are in the same place
- has_nan = any(x_isnan) or any(y_isnan)
- if has_nan:
- chk_same_position(x_isnan, y_isnan, hasval='nan')
-
- if equal_inf:
- x_isinf, y_isinf = isinf(x), isinf(y)
- # Validate that infinite values are in the same place
- has_inf = any(x_isinf) or any(y_isinf)
- if has_inf:
- # Check +inf and -inf separately, since they are different
- chk_same_position(x == +inf, y == +inf, hasval='+inf')
- chk_same_position(x == -inf, y == -inf, hasval='-inf')
-
- if has_nan and has_inf:
- x = x[~(x_isnan | x_isinf)]
- y = y[~(y_isnan | y_isinf)]
- elif has_nan:
- x = x[~x_isnan]
- y = y[~y_isnan]
- elif has_inf:
- x = x[~x_isinf]
- y = y[~y_isinf]
-
- # Only do the comparison if actual values are left
- if x.size == 0:
- return
-
- elif istime(x) and istime(y):
- # If one is datetime64 and the other timedelta64 there is no point
- if equal_nan and x.dtype.type == y.dtype.type:
- x_isnat, y_isnat = isnat(x), isnat(y)
-
- if any(x_isnat) or any(y_isnat):
- chk_same_position(x_isnat, y_isnat, hasval="NaT")
-
- if any(x_isnat) or any(y_isnat):
- x = x[~x_isnat]
- y = y[~y_isnat]
-
- val = comparison(x, y)
-
- if isinstance(val, bool):
- cond = val
- reduced = [0]
- else:
- reduced = val.ravel()
- cond = reduced.all()
- reduced = reduced.tolist()
- if not cond:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
- except ValueError:
- import traceback
- efmt = traceback.format_exc()
- header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
-
- msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise ValueError(msg)
-
-
-def assert_array_equal(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not equal.
-
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
-
- The usual caution for verifying equality with floating point numbers is
- advised.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- The first assert does not raise an exception:
-
- >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
- ... [np.exp(0),2.33333, np.nan])
-
- Assert fails with numerical inprecision with floats:
-
- >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- AssertionError:
- Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
-
- Use `assert_allclose` or one of the nulp (number of floating point values)
- functions for these cases instead:
-
- >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan],
- ... rtol=1e-10, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
- verbose=verbose, header='Arrays are not equal')
-
-
-def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies identical shapes and that the elements of ``actual`` and
- ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation did up to rounding vagaries. An exception is raised
- at shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if both
- objects have NaNs in the same positions.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- decimal : int, optional
- Desired precision, default is 6.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- the first assert does not raise an exception
-
- >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
- AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
- from numpy.core.numerictypes import issubdtype
- from numpy.core.fromnumeric import any as npany
-
- def compare(x, y):
- try:
- if npany(gisinf(x)) or npany( gisinf(y)):
- xinfid = gisinf(x)
- yinfid = gisinf(y)
- if not (xinfid == yinfid).all():
- return False
- # if one item, x and y is +- inf
- if x.size == y.size == 1:
- return x == y
- x = x[~xinfid]
- y = y[~yinfid]
- except (TypeError, NotImplementedError):
- pass
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x - y)
-
- if not issubdtype(z.dtype, number):
- z = z.astype(float_) # handle object arrays
-
- return z < 1.5 * 10.0**(-decimal)
-
- assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
- header=('Arrays are not almost equal to %d decimals' % decimal),
- precision=decimal)
-
-
-def assert_array_less(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not ordered by less
- than.
-
- Given two array_like objects, check that the shape is equal and all
- elements of the first object are strictly smaller than those of the
- second object. An exception is raised at shape mismatch or incorrectly
- ordered values. Shape mismatch does not raise if an object has zero
- dimension. In contrast to the standard usage in numpy, NaNs are
- compared, no assertion is raised if both objects have NaNs in the same
- positions.
-
-
-
- Parameters
- ----------
- x : array_like
- The smaller object to check.
- y : array_like
- The larger object to compare.
- err_msg : string
- The error message to be printed in case of failure.
- verbose : bool
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_array_equal: tests objects for equality
- assert_array_almost_equal: test objects for equality up to precision
-
-
-
- Examples
- --------
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
-
- >>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
- y: array(3)
-
- >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
- y: array([4])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
- verbose=verbose,
- header='Arrays are not less-ordered',
- equal_inf=False)
-
-
-def runstring(astr, dict):
- exec(astr, dict)
-
-
-def assert_string_equal(actual, desired):
- """
- Test if two strings are equal.
-
- If the given strings are equal, `assert_string_equal` does nothing.
- If they are not equal, an AssertionError is raised, and the diff
- between the strings is shown.
-
- Parameters
- ----------
- actual : str
- The string to test for equality against the expected string.
- desired : str
- The expected string.
-
- Examples
- --------
- >>> np.testing.assert_string_equal('abc', 'abc')
- >>> np.testing.assert_string_equal('abc', 'abcd')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ...
- AssertionError: Differences in strings:
- - abc+ abcd? +
-
- """
- # delay import of difflib to reduce startup time
- __tracebackhide__ = True # Hide traceback for py.test
- import difflib
-
- if not isinstance(actual, str):
- raise AssertionError(repr(type(actual)))
- if not isinstance(desired, str):
- raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
- return
-
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
- diff_list = []
- while diff:
- d1 = diff.pop(0)
- if d1.startswith(' '):
- continue
- if d1.startswith('- '):
- l = [d1]
- d2 = diff.pop(0)
- if d2.startswith('? '):
- l.append(d2)
- d2 = diff.pop(0)
- if not d2.startswith('+ '):
- raise AssertionError(repr(d2))
- l.append(d2)
- if diff:
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
- continue
- diff_list.extend(l)
- continue
- raise AssertionError(repr(d1))
- if not diff_list:
- return
- msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
- if actual != desired:
- raise AssertionError(msg)
-
-
-def rundocs(filename=None, raise_on_error=True):
- """
- Run doctests found in the given file.
-
- By default `rundocs` raises an AssertionError on failure.
-
- Parameters
- ----------
- filename : str
- The path to the file for which the doctests are run.
- raise_on_error : bool
- Whether to raise an AssertionError when a doctest fails. Default is
- True.
-
- Notes
- -----
- The doctests can be run by the user/developer by adding the ``doctests``
- argument to the ``test()`` call. For example, to run all tests (including
- doctests) for `numpy.lib`:
-
- >>> np.lib.test(doctests=True) #doctest: +SKIP
- """
- from numpy.compat import npy_load_module
- import doctest
- if filename is None:
- f = sys._getframe(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
-
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
-
- msg = []
- if raise_on_error:
- out = lambda s: msg.append(s)
- else:
- out = None
-
- for test in tests:
- runner.run(test, out=out)
-
- if runner.failures > 0 and raise_on_error:
- raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-
-
-def raises(*args):
- """Decorator to check for raised exceptions.
-
- The decorated test function must raise one of the passed exceptions to
- pass. If you want to test many assertions about exceptions in a single
- test, you may want to use `assert_raises` instead.
-
- .. warning::
- This decorator is nose specific, do not use it if you are using a
- different test framework.
-
- Parameters
- ----------
- args : exceptions
- The test passes if any of the passed exceptions is raised.
-
- Raises
- ------
- AssertionError
-
- Examples
- --------
-
- Usage::
-
- @raises(TypeError, ValueError)
- def test_raises_type_error():
- raise TypeError("This test passes")
-
- @raises(Exception)
- def test_that_fails_by_passing():
- pass
-
- """
- nose = import_nose()
- return nose.tools.raises(*args)
-
-#
-# assert_raises and assert_raises_regex are taken from unittest.
-#
-import unittest
-
-
-class _Dummy(unittest.TestCase):
- def nop(self):
- pass
-
-_d = _Dummy('nop')
-
-def assert_raises(*args, **kwargs):
- """
- assert_raises(exception_class, callable, *args, **kwargs)
- assert_raises(exception_class)
-
- Fail unless an exception of class exception_class is thrown
- by callable when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
-
- Alternatively, `assert_raises` can be used as a context manager:
-
- >>> from numpy.testing import assert_raises
- >>> with assert_raises(ZeroDivisionError):
- ... 1 / 0
-
- is equivalent to
-
- >>> def div(x, y):
- ... return x / y
- >>> assert_raises(ZeroDivisionError, div, 1, 0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- return _d.assertRaises(*args,**kwargs)
-
-
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
- """
- assert_raises_regex(exception_class, expected_regexp, callable, *args,
- **kwargs)
- assert_raises_regex(exception_class, expected_regexp)
-
- Fail unless an exception of class exception_class and with message that
- matches expected_regexp is thrown by callable when invoked with arguments
- args and keyword arguments kwargs.
-
- Alternatively, can be used as a context manager like `assert_raises`.
-
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
- Notes
- -----
- .. versionadded:: 1.9.0
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
-
- if sys.version_info.major >= 3:
- funcname = _d.assertRaisesRegex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = _d.assertRaisesRegexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
-
-
-def decorate_methods(cls, decorator, testmatch=None):
- """
- Apply a decorator to all methods in a class matching a regular expression.
-
- The given decorator is applied to all public methods of `cls` that are
- matched by the regular expression `testmatch`
- (``testmatch.search(methodname)``). Methods that are private, i.e. start
- with an underscore, are ignored.
-
- Parameters
- ----------
- cls : class
- Class whose methods to decorate.
- decorator : function
- Decorator to apply to methods
- testmatch : compiled regexp or str, optional
- The regular expression. Default value is None, in which case the
- nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
- is used.
- If `testmatch` is a string, it is compiled to a regular expression
- first.
-
- """
- if testmatch is None:
- testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
- else:
- testmatch = re.compile(testmatch)
- cls_attr = cls.__dict__
-
- # delayed import to reduce startup time
- from inspect import isfunction
-
- methods = [_m for _m in cls_attr.values() if isfunction(_m)]
- for function in methods:
- try:
- if hasattr(function, 'compat_func_name'):
- funcname = function.compat_func_name
- else:
- funcname = function.__name__
- except AttributeError:
- # not a function
- continue
- if testmatch.search(funcname) and not funcname.startswith('_'):
- setattr(cls, funcname, decorator(function))
- return
-
-
-def measure(code_str,times=1,label=None):
- """
- Return elapsed time for executing code in the namespace of the caller.
-
- The supplied code string is compiled with the Python builtin ``compile``.
- The precision of the timing is 10 milli-seconds. If the code will execute
- fast on this timescale, it can be executed many times to get reasonable
- timing accuracy.
-
- Parameters
- ----------
- code_str : str
- The code to be timed.
- times : int, optional
- The number of times the code is executed. Default is 1. The code is
- only compiled once.
- label : str, optional
- A label to identify `code_str` with. This is passed into ``compile``
- as the second argument (for run-time error messages).
-
- Returns
- -------
- elapsed : float
- Total elapsed time in seconds for executing `code_str` `times` times.
-
- Examples
- --------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
- Time for a single execution : 0.005 s
-
- """
- frame = sys._getframe(1)
- locs, globs = frame.f_locals, frame.f_globals
-
- code = compile(code_str,
- 'Test name: %s ' % label,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i < times:
- i += 1
- exec(code, globs, locs)
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
-
-def _assert_valid_refcount(op):
- """
- Check that ufuncs don't mishandle refcount of object `1`.
- Used in a few regression tests.
- """
- if not HAS_REFCOUNT:
- return True
- import numpy as np
-
- b = np.arange(100*100).reshape(100, 100)
- c = b
- i = 1
-
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
- del d # for pyflakes
-
-
-def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
- err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- tolerance.
-
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
-
- .. versionadded:: 1.5.0
-
- Parameters
- ----------
- actual : array_like
- Array obtained.
- desired : array_like
- Array desired.
- rtol : float, optional
- Relative tolerance.
- atol : float, optional
- Absolute tolerance.
- equal_nan : bool, optional.
- If True, NaNs will compare equal.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_array_almost_equal_nulp, assert_array_max_ulp
-
- Examples
- --------
- >>> x = [1e-5, 1e-3, 1e-1]
- >>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- def compare(x, y):
- return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
- equal_nan=equal_nan)
-
- actual, desired = np.asanyarray(actual), np.asanyarray(desired)
- header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
- assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
- verbose=verbose, header=header, equal_nan=equal_nan)
-
-
-def assert_array_almost_equal_nulp(x, y, nulp=1):
- """
- Compare two arrays relatively to their spacing.
-
- This is a relatively robust method to compare two arrays whose amplitude
- is variable.
-
- Parameters
- ----------
- x, y : array_like
- Input arrays.
- nulp : int, optional
- The maximum number of unit in the last place for tolerance (see Notes).
- Default is 1.
-
- Returns
- -------
- None
-
- Raises
- ------
- AssertionError
- If the spacing between `x` and `y` for one or more elements is larger
- than `nulp`.
-
- See Also
- --------
- assert_array_max_ulp : Check that all items of arrays differ in at most
- N Units in the Last Place.
- spacing : Return the distance between x and the nearest adjacent number.
-
- Notes
- -----
- An assertion is raised if the following condition is not met::
-
- abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
-
- Examples
- --------
- >>> x = np.array([1., 1e-10, 1e-20])
- >>> eps = np.finfo(x.dtype).eps
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
-
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
- Traceback (most recent call last):
- ...
- AssertionError: X and Y are not equal to 1 ULP (max is 2)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ax = np.abs(x)
- ay = np.abs(y)
- ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
- if not np.all(np.abs(x-y) <= ref):
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- msg = "X and Y are not equal to %d ULP" % nulp
- else:
- max_nulp = np.max(nulp_diff(x, y))
- msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
- raise AssertionError(msg)
-
-
-def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
- """
- Check that all items of arrays differ in at most N Units in the Last Place.
-
- Parameters
- ----------
- a, b : array_like
- Input arrays to be compared.
- maxulp : int, optional
- The maximum number of units in the last place that elements of `a` and
- `b` can differ. Default is 1.
- dtype : dtype, optional
- Data-type to convert `a` and `b` to if given. Default is None.
-
- Returns
- -------
- ret : ndarray
- Array containing number of representable floating point numbers between
- items in `a` and `b`.
-
- Raises
- ------
- AssertionError
- If one or more elements differ by more than `maxulp`.
-
- See Also
- --------
- assert_array_almost_equal_nulp : Compare two arrays relatively to their
- spacing.
-
- Examples
- --------
- >>> a = np.linspace(0., 1., 100)
- >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ret = nulp_diff(a, b, dtype)
- if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
- return ret
-
-
-def nulp_diff(x, y, dtype=None):
- """For each item in x and y, return the number of representable floating
- points between them.
-
- Parameters
- ----------
- x : array_like
- first input array
- y : array_like
- second input array
- dtype : dtype, optional
- Data-type to convert `x` and `y` to if given. Default is None.
-
- Returns
- -------
- nulp : array_like
- number of representable floating point numbers between each item in x
- and y.
-
- Examples
- --------
- # By definition, epsilon is the smallest number such as 1 + eps != 1, so
- # there should be exactly one ULP between 1 and 1 + eps
- >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
- 1.0
- """
- import numpy as np
- if dtype:
- x = np.array(x, dtype=dtype)
- y = np.array(y, dtype=dtype)
- else:
- x = np.array(x)
- y = np.array(y)
-
- t = np.common_type(x, y)
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- raise NotImplementedError("_nulp not implemented for complex array")
-
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
-
- if not x.shape == y.shape:
- raise ValueError("x and y do not have the same shape: %s - %s" %
- (x.shape, y.shape))
-
- def _diff(rx, ry, vdt):
- diff = np.array(rx-ry, dtype=vdt)
- return np.abs(diff)
-
- rx = integer_repr(x)
- ry = integer_repr(y)
- return _diff(rx, ry, t)
-
-
-def _integer_repr(x, vdt, comp):
- # Reinterpret binary representation of the float as sign-magnitude:
- # take into account two-complement representation
- # See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
- rx = x.view(vdt)
- if not (rx.size == 1):
- rx[rx < 0] = comp - rx[rx < 0]
- else:
- if rx < 0:
- rx = comp - rx
-
- return rx
-
-
-def integer_repr(x):
- """Return the signed-magnitude interpretation of the binary representation of
- x."""
- import numpy as np
- if x.dtype == np.float16:
- return _integer_repr(x, np.int16, np.int16(-2**15))
- elif x.dtype == np.float32:
- return _integer_repr(x, np.int32, np.int32(-2**31))
- elif x.dtype == np.float64:
- return _integer_repr(x, np.int64, np.int64(-2**63))
- else:
- raise ValueError("Unsupported dtype %s" % x.dtype)
-
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-
-class WarningManager(object):
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
-
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
-
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
- def __exit__(self):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-
-@contextlib.contextmanager
-def _assert_warns_context(warning_class, name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with suppress_warnings() as sup:
- l = sup.record(warning_class)
- yield
- if not len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("No warning raised" + name_str)
-
-
-def assert_warns(warning_class, *args, **kwargs):
- """
- Fail unless the given callable throws the specified warning.
-
- A warning of class warning_class should be thrown by the callable when
- invoked with arguments args and keyword arguments kwargs.
- If a different type of warning is thrown, it will not be caught.
-
- If called with all arguments other than the warning class omitted, may be
- used as a context manager:
-
- with assert_warns(SomeWarning):
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.4.0
-
- Parameters
- ----------
- warning_class : class
- The class defining the warning that `func` is expected to throw.
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_warns_context(warning_class)
-
- func = args[0]
- args = args[1:]
- with _assert_warns_context(warning_class, name=func.__name__):
- return func(*args, **kwargs)
-
-
-@contextlib.contextmanager
-def _assert_no_warnings_context(name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with warnings.catch_warnings(record=True) as l:
- warnings.simplefilter('always')
- yield
- if len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("Got warnings%s: %s" % (name_str, l))
-
-
-def assert_no_warnings(*args, **kwargs):
- """
- Fail if the given callable produces any warnings.
-
- If called with all arguments omitted, may be used as a context manager:
-
- with assert_no_warnings():
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_no_warnings_context()
-
- func = args[0]
- args = args[1:]
- with _assert_no_warnings_context(name=func.__name__):
- return func(*args, **kwargs)
-
-
-def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
- """
- generator producing data with different alignment and offsets
- to test simd vectorization
-
- Parameters
- ----------
- dtype : dtype
- data type to produce
- type : string
- 'unary': create data for unary operations, creates one input
- and output array
- 'binary': create data for unary operations, creates two input
- and output array
- max_size : integer
- maximum size of data to produce
-
- Returns
- -------
- if type is 'unary' yields one output, one input array and a message
- containing information on the data
- if type is 'binary' yields one output array, two input array and a message
- containing information on the data
-
- """
- ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
- bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
- for o in range(3):
- for s in range(o + 2, max(o + 3, max_size)):
- if type == 'unary':
- inp = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
- d = inp()
- yield d, d, ufmt % (o, o, s, dtype, 'in place')
- yield out[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'out of place')
- yield inp()[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'aliased')
- yield inp()[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'aliased')
- if type == 'binary':
- inp1 = lambda: arange(s, dtype=dtype)[o:]
- inp2 = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp1(), inp2(), bfmt % \
- (o, o, o, s, dtype, 'out of place')
- d = inp1()
- yield d, d, inp2(), bfmt % \
- (o, o, o, s, dtype, 'in place1')
- d = inp2()
- yield d, inp1(), d, bfmt % \
- (o, o, o, s, dtype, 'in place2')
- yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'out of place')
- yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'aliased')
-
-
-class IgnoreException(Exception):
- "Ignoring this exception due to disabled feature"
- pass
-
-
-@contextlib.contextmanager
-def tempdir(*args, **kwargs):
- """Context manager to provide a temporary test folder.
-
- All arguments are passed as this to the underlying tempfile.mkdtemp
- function.
-
- """
- tmpdir = mkdtemp(*args, **kwargs)
- try:
- yield tmpdir
- finally:
- shutil.rmtree(tmpdir)
-
-
-@contextlib.contextmanager
-def temppath(*args, **kwargs):
- """Context manager for temporary files.
-
- Context manager that returns the path to a closed temporary file. Its
- parameters are the same as for tempfile.mkstemp and are passed directly
- to that function. The underlying file is removed when the context is
- exited, so it should be closed at that time.
-
- Windows does not allow a temporary file to be opened if it is already
- open, so the underlying file must be closed after opening before it
- can be opened again.
-
- """
- fd, path = mkstemp(*args, **kwargs)
- os.close(fd)
- try:
- yield path
- finally:
- os.remove(path)
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- """ Context manager that resets warning registry for catching warnings
-
- Warnings can be slippery, because, whenever a warning is triggered, Python
- adds a ``__warningregistry__`` member to the *calling* module. This makes
- it impossible to retrigger the warning in this module, whatever you put in
- the warnings filters. This context manager accepts a sequence of `modules`
- as a keyword argument to its constructor and:
-
- * stores and removes any ``__warningregistry__`` entries in given `modules`
- on entry;
- * resets ``__warningregistry__`` to its previous state on exit.
-
- This makes it possible to trigger any warning afresh inside the context
- manager without disturbing the state of warnings outside.
-
- For compatibility with Python 3.0, please consider all arguments to be
- keyword-only.
-
- Parameters
- ----------
- record : bool, optional
- Specifies whether warnings should be captured by a custom
- implementation of ``warnings.showwarning()`` and be appended to a list
- returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
- modules : sequence, optional
- Sequence of modules for which to reset warnings registry on entry and
- restore on exit. To work correctly, all 'ignore' filters should
- filter by one of these modules.
-
- Examples
- --------
- >>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
- ... warnings.simplefilter('always')
- ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
- ... # do something that raises a warning but ignore those in
- ... # np.core.fromnumeric
- """
- class_modules = ()
-
- def __init__(self, record=False, modules=()):
- self.modules = set(modules).union(self.class_modules)
- self._warnreg_copies = {}
- super(clear_and_catch_warnings, self).__init__(record=record)
-
- def __enter__(self):
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod_reg = mod.__warningregistry__
- self._warnreg_copies[mod] = mod_reg.copy()
- mod_reg.clear()
- return super(clear_and_catch_warnings, self).__enter__()
-
- def __exit__(self, *exc_info):
- super(clear_and_catch_warnings, self).__exit__(*exc_info)
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod.__warningregistry__.clear()
- if mod in self._warnreg_copies:
- mod.__warningregistry__.update(self._warnreg_copies[mod])
-
-
-class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
-
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
-
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
-
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to "always".
- "location" is equivalent to the warnings "default", match by exact
- location the warning warning originated from.
-
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
-
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
-
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
-
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
-
- Like ``catch_warnings`` this context manager is not threadsafe.
-
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are either instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None, record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
-
- All warnings matching will be appended to the ``log`` attribute.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message, module=module,
- record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, either recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
Back compatibility noseclasses module. It will import the appropriate
set of tools
"""
-from .nose_tools.noseclasses import *
\ No newline at end of file
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.noseclasses is deprecated, "
+ "import from numpy.testing instead",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.noseclasses import *
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.nosetester import *
+import warnings
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.nosetester is deprecated, "
+ "import from numpy.testing instead.",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.nosetester import *
__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
'_numpy_tester', 'get_package_name', 'import_nose',
+++ /dev/null
-"""
-Compatibility shim for pytest compatibility with the nose decorators.
-
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward.
-
-Decorators that return a new function will not preserve meta-data such as
-function name, setup and teardown functions and so on.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstact classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
-
-from .utils import SkipTest, assert_warns, HAS_REFCOUNT
-
-__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
- 'parametrize', '_needs_refcount',]
-
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to mark as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print('Big, slow test')
-
- """
- import pytest
-
- return pytest.mark.slow(t)
-
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing.decorators import setastest
-
- @setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- Undecorated functions are returned and that may lead to some lost
- information. Note that this function differ from the pytest fixture
- ``pytest.mark.skipif``. The latter marks test functions on import and the
- skip is handled during collection, hence it cannot be used for non-test
- functions, nor does it handle callable conditions.
-
- """
- def skip_decorator(f):
- # Local import to avoid a hard pytest dependency and only incur the
- # import time overhead at actual test-time.
- import inspect
- import pytest
-
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
-
- # Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections_abc.Callable):
- skip_val = lambda: skip_condition()
- else:
- skip_val = lambda: skip_condition
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
- return "Skipping test: %s: %s" % (func.__name__, out)
-
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if inspect.isgeneratorfunction(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
- return skipper
-
- return skip_decorator
-
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureException exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureException exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes
- KnownFailureException to be raised when `fail_condition` is True,
- and the function to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is not decorated in the pytest case unlike for nose.
-
- """
- import pytest
- from .utils import KnownFailureException
-
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections_abc.Callable):
- fail_val = lambda: fail_condition()
- else:
- fail_val = lambda: fail_condition
-
- def knownfail_decorator(f):
-
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureException(msg)
- return f(*args, **kwargs)
-
- return knownfailer
-
- return knownfail_decorator
-
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- with assert_warns(DeprecationWarning):
- f(*args, **kwargs)
-
- if isinstance(conditional, collections_abc.Callable):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return _deprecated_imp
- else:
- return f
- return deprecate_decorator
-
-
-def parametrize(vars, input):
- """
- Pytest compatibility class. This implements the simplest level of
- pytest.mark.parametrize for use in nose as an aid in making the transition
- to pytest. It achieves that by adding a dummy var parameter and ignoring
- the doc_func parameter of the base class. It does not support variable
- substitution by name, nor does it support nesting or classes. See the
- pytest documentation for usage.
-
- """
- import pytest
-
- return pytest.mark.parametrize(vars, input)
-
-
-_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
+++ /dev/null
-# These classes implement a doctest runner plugin for nose, a "known failure"
-# error class, and a customized TestProgram for NumPy.
-
-# Because this module imports nose directly, it should not
-# be used except by nosetester.py to avoid a general NumPy
-# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
-import os
-import doctest
-import inspect
-
-import numpy
-import pytest
-from .utils import KnownFailureException, SkipTest
-import _pytest.runner
-import _pytest.skipping
-
-
-class NpyPlugin(object):
-
- def pytest_runtest_makereport(self, call):
- if call.excinfo:
- if call.excinfo.errisinstance(KnownFailureException):
- #let's substitute the excinfo with a pytest.xfail one
- call2 = call.__class__(
- lambda: _pytest.runner.skip(str(call.excinfo.value)),
- call.when)
- print()
- print()
- print(call.excinfo._getreprcrash())
- print()
- print(call.excinfo)
- print()
- print(call2.excinfo)
- print()
- call.excinfo = call2.excinfo
- if call.excinfo.errisinstance(SkipTest):
- #let's substitute the excinfo with a pytest.skip one
- call2 = call.__class__(
- lambda: _pytest.runner.skip(str(call.excinfo.value)),
- call.when)
- call.excinfo = call2.excinfo
-
-
-if False:
- from nose.plugins import doctests as npd
- from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
- from nose.plugins.base import Plugin
- from nose.util import src
- from .nosetester import get_package_name
- # Some of the classes in this module begin with 'Numpy' to clearly distinguish
- # them from the plethora of very similar names from nose/unittest/doctest
-
- #-----------------------------------------------------------------------------
- # Modified version of the one in the stdlib, that fixes a python bug (doctests
- # not found in extension modules, http://bugs.python.org/issue3158)
- class NumpyDocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
-
- doctest.DocTestFinder._find(self, tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
- from inspect import (
- isroutine, isclass, ismodule, isfunction, ismethod
- )
-
- # Look for tests in a module's contained objects.
- if ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val)):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((isfunction(val) or isclass(val) or
- ismethod(val) or isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
- # second-chance checker; if the default comparison doesn't
- # pass, then see if the expected output string contains flags that
- # tell us to ignore the output
- class NumpyOutputChecker(doctest.OutputChecker):
- def check_output(self, want, got, optionflags):
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret:
- if "#random" in want:
- return True
-
- # it would be useful to normalize endianness so that
- # bigendian machines don't fail all the tests (and there are
- # actually some bigendian examples in the doctests). Let's try
- # making them all little endian
- got = got.replace("'>", "'<")
- want = want.replace("'>", "'<")
-
- # try to normalize out 32 and 64 bit default int sizes
- for sz in [4, 8]:
- got = got.replace("'<i%d'" % sz, "int")
- want = want.replace("'<i%d'" % sz, "int")
-
- ret = doctest.OutputChecker.check_output(self, want,
- got, optionflags)
-
- return ret
-
-
- # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
- # its constructor that blocks non-default arguments from being passed
- # down into doctest.DocTestCase
- class NumpyDocTestCase(npd.DocTestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- self._nose_obj = obj
- doctest.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
-
-
- print_state = numpy.get_printoptions()
-
- class NumpyDoctest(npd.Doctest):
- name = 'numpydoctest' # call nosetests with --with-numpydoctest
- score = 1000 # load late, after doctest builtin
-
- # always use whitespace and ellipsis options for doctests
- doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- # files that should be ignored for doctests
- doctest_ignore = ['generate_numpy_api.py',
- 'setup.py']
-
- # Custom classes; class variables to allow subclassing
- doctest_case_class = NumpyDocTestCase
- out_check_class = NumpyOutputChecker
- test_finder_class = NumpyDocTestFinder
-
- # Don't use the standard doctest option handler; hard-code the option values
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- # Test doctests in 'test' files / directories. Standard plugin default
- # is False
- self.doctest_tests = True
- # Variable name; if defined, doctest results stored in this variable in
- # the top-level namespace. None is the standard default
- self.doctest_result_var = None
-
- def configure(self, options, config):
- # parent method sets enabled flag from command line --with-numpydoctest
- Plugin.configure(self, options, config)
- self.finder = self.test_finder_class()
- self.parser = doctest.DocTestParser()
- if self.enabled:
- # Pull standard doctest out of plugin list; there's no reason to run
- # both. In practice the Unplugger plugin above would cover us when
- # run from a standard numpy.test() call; this is just in case
- # someone wants to run our plugin outside the numpy.test() machinery
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
-
- def set_test_context(self, test):
- """ Configure `test` object to set test context
-
- We set the numpy / scipy standard doctest namespace
-
- Parameters
- ----------
- test : test object
- with ``globs`` dictionary defining namespace
-
- Returns
- -------
- None
-
- Notes
- -----
- `test` object modified in place
- """
- # set the namespace for tests
- pkg_name = get_package_name(os.path.dirname(test.filename))
-
- # Each doctest should execute in an environment equivalent to
- # starting Python and executing "import numpy as np", and,
- # for SciPy packages, an additional import of the local
- # package (so that scipy.linalg.basic.py's doctests have an
- # implicit "from scipy import linalg" as well.
- #
- # Note: __file__ allows the doctest in NoseTester to run
- # without producing an error
- test.globs = {'__builtins__':__builtins__,
- '__file__':'__main__',
- '__name__':'__main__',
- 'np':numpy}
- # add appropriate scipy import for SciPy tests
- if 'scipy' in pkg_name:
- p = pkg_name.split('.')
- p2 = p[-1]
- test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
-
- # Override test loading to customize test context (with set_test_context
- # method), set standard docstring options, and install our own test output
- # checker
- def loadTestsFromModule(self, module):
- if not self.matches(module.__name__):
- npd.log.debug("Doctest doesn't want module %s", module)
- return
- try:
- tests = self.finder.find(module)
- except AttributeError:
- # nose allows module.__test__ = False; doctest does not and
- # throws AttributeError
- return
- if not tests:
- return
- tests.sort()
- module_file = src(module.__file__)
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
- # Set test namespace; test altered in place
- self.set_test_context(test)
- yield self.doctest_case_class(test,
- optionflags=self.doctest_optflags,
- checker=self.out_check_class(),
- result_var=self.doctest_result_var)
-
- # Add an afterContext method to nose.plugins.doctests.Doctest in order
- # to restore print options to the original state after each doctest
- def afterContext(self):
- numpy.set_printoptions(**print_state)
-
- # Ignore NumPy-specific build files that shouldn't be searched for tests
- def wantFile(self, file):
- bn = os.path.basename(file)
- if bn in self.doctest_ignore:
- return False
- return npd.Doctest.wantFile(self, file)
-
-
- class Unplugger(object):
- """ Nose plugin to remove named plugin late in loading
-
- By default it removes the "doctest" plugin.
- """
- name = 'unplugger'
- enabled = True # always enabled
- score = 4000 # load late in order to be after builtins
-
- def __init__(self, to_unplug='doctest'):
- self.to_unplug = to_unplug
-
- def options(self, parser, env):
- pass
-
- def configure(self, options, config):
- # Pull named plugin out of plugins list
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != self.to_unplug]
-
-
-
- # Class allows us to save the results of the tests in runTests - see runTests
- # method docstring for details
- class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
-
- Because nose currently discards the test result object, but we need
- to return it to the user, override TestProgram.runTests to retain
- the result
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
-
+++ /dev/null
-"""
-Nose test running.
-
-This module implements ``test()`` and ``bench()`` functions for NumPy modules.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-from numpy.compat import basestring
-import numpy as np
-
-from .utils import import_nose, suppress_warnings
-
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
-
-
-def get_package_name(filepath):
- """
- Given a path where a package is installed, determine its name.
-
- Parameters
- ----------
- filepath : str
- Path to a file. If the determination fails, "numpy" is returned.
-
- Examples
- --------
- >>> np.testing.nosetester.get_package_name('nonsense')
- 'numpy'
-
- """
-
- fullpath = filepath[:]
- pkg_name = []
- while 'site-packages' in filepath or 'dist-packages' in filepath:
- filepath, p2 = os.path.split(filepath)
- if p2 in ('site-packages', 'dist-packages'):
- break
- pkg_name.append(p2)
-
- # if package name determination failed, just default to numpy/scipy
- if not pkg_name:
- if 'scipy' in fullpath:
- return 'scipy'
- else:
- return 'numpy'
-
- # otherwise, reverse to get correct order and return
- pkg_name.reverse()
-
- # don't include the outer egg directory
- if pkg_name[0].endswith('.egg'):
- pkg_name.pop(0)
-
- return '.'.join(pkg_name)
-
-
-def run_module_suite(file_to_run=None, argv=None):
- """
- Run a test module.
-
- Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
- the command line. This version is for pytest rather than nose.
-
- Parameters
- ----------
- file_to_run : str, optional
- Path to test module, or None.
- By default, run the module from which this function is called.
- argv : list of strings
- Arguments to be passed to the pytest runner. ``argv[0]`` is
- ignored. All command line arguments accepted by ``pytest``
- will work. If it is the default value None, sys.argv is used.
-
- .. versionadded:: 1.14.0
-
- Examples
- --------
- Adding the following::
-
- if __name__ == "__main__" :
- run_module_suite(argv=sys.argv)
-
- at the end of a test module will run the tests when that module is
- called in the python interpreter.
-
- Alternatively, calling::
-
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
-
- from an interpreter will run all the test routine in 'test_matlib.py'.
- """
- import pytest
- if file_to_run is None:
- f = sys._getframe(1)
- file_to_run = f.f_locals.get('__file__', None)
- if file_to_run is None:
- raise AssertionError
-
- if argv is None:
- argv = sys.argv[1:] + [file_to_run]
- else:
- argv = argv + [file_to_run]
-
- pytest.main(argv)
-
-if False:
- # disable run_module_suite and NoseTester
- # until later
- class NoseTester(object):
- """
- Nose test runner.
-
- This class is made available as numpy.testing.Tester, and a test function
- is typically added to a package's __init__.py like so::
-
- from numpy.testing import Tester
- test = Tester().test
-
- Calling this test function finds and runs all tests associated with the
- package and all its sub-packages.
-
- Attributes
- ----------
- package_path : str
- Full path to the package to test.
- package_name : str
- Name of the package to test.
-
- Parameters
- ----------
- package : module, str or None, optional
- The package to test. If a string, this should be the full path to
- the package. If None (default), `package` is set to the module from
- which `NoseTester` is initialized.
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- Default is "release".
- depth : int, optional
- If `package` is None, then this can be used to initialize from the
- module of the caller of (the caller of (...)) the code that
- initializes `NoseTester`. Default of 0 means the module of the
- immediate caller; higher values are useful for utility routines that
- want to initialize `NoseTester` objects on behalf of other code.
-
- """
- def __init__(self, package=None, raise_warnings="release", depth=0):
- # Back-compat: 'None' used to mean either "release" or "develop"
- # depending on whether this was a release or develop version of
- # numpy. Those semantics were fine for testing numpy, but not so
- # helpful for downstream projects like scipy that use
- # numpy.testing. (They want to set this based on whether *they* are a
- # release or develop version, not whether numpy is.) So we continue to
- # accept 'None' for back-compat, but it's now just an alias for the
- # default "release".
- if raise_warnings is None:
- raise_warnings = "release"
-
- package_name = None
- if package is None:
- f = sys._getframe(1 + depth)
- package_path = f.f_locals.get('__file__', None)
- if package_path is None:
- raise AssertionError
- package_path = os.path.dirname(package_path)
- package_name = f.f_locals.get('__name__', None)
- elif isinstance(package, type(os)):
- package_path = os.path.dirname(package.__file__)
- package_name = getattr(package, '__name__', None)
- else:
- package_path = str(package)
-
- self.package_path = package_path
-
- # Find the package name under test; this name is used to limit coverage
- # reporting (if enabled).
- if package_name is None:
- package_name = get_package_name(package_path)
- self.package_name = package_name
-
- # Set to "release" in constructor in maintenance branches.
- self.raise_warnings = raise_warnings
-
- def _test_argv(self, label, verbose, extra_argv):
- ''' Generate argv for nosetests command
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- see ``test`` docstring
- verbose : int, optional
- Integer in range 1..3, bigger means more verbose.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- argv : list
- command line arguments that will be passed to nose
- '''
- argv = [__file__, self.package_path, '-s']
- if label and label != 'full':
- if not isinstance(label, basestring):
- raise TypeError('Selection label should be a string')
- if label == 'fast':
- label = 'not slow'
- argv += ['-A', label]
-
- argv += [['-q'], [''], ['-v']][min(verbose - 1, 2)]
-
- # FIXME is this true of pytest
- # When installing with setuptools, and also in some other cases, the
- # test_*.py files end up marked +x executable. Nose, by default, does
- # not run files marked with +x as they might be scripts. However, in
- # our case nose only looks for test_*.py files under the package
- # directory, which should be safe.
- # argv += ['--exe']
- if extra_argv:
- argv += extra_argv
- return argv
-
- def _show_system_info(self):
- import pytest
- import numpy
-
- print("NumPy version %s" % numpy.__version__)
- relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
- print("NumPy relaxed strides checking option:", relaxed_strides)
- npdir = os.path.dirname(numpy.__file__)
- print("NumPy is installed in %s" % npdir)
-
- if 'scipy' in self.package_name:
- import scipy
- print("SciPy version %s" % scipy.__version__)
- spdir = os.path.dirname(scipy.__file__)
- print("SciPy is installed in %s" % spdir)
-
- pyversion = sys.version.replace('\n', '')
- print("Python version %s" % pyversion)
- print("pytest version %d.%d.%d" % pytest.__versioninfo__)
-
- def _get_custom_doctester(self):
- """ Return instantiated plugin for doctests
-
- Allows subclassing of this class to override doctester
-
- A return value of None means use the nose builtin doctest plugin
- """
- from .noseclasses import NumpyDoctest
- return NumpyDoctest()
-
- def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, timer=False):
- """
- Run tests for module using nose.
-
- This method does the heavy lifting for the `test` method. It takes all
- the same arguments, for details see `test`.
-
- See Also
- --------
- test
-
- """
- # fail with nice error message if nose is not present
- import_nose()
- # compile argv
- argv = self._test_argv(label, verbose, extra_argv)
- # our way of doing coverage
- if coverage:
- argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
- '--cover-tests', '--cover-erase']
-
- if timer:
- if timer is True:
- argv += ['--with-timer']
- elif isinstance(timer, int):
- argv += ['--with-timer', '--timer-top-n', str(timer)]
-
- # construct list of plugins
- import nose.plugins.builtin
- from nose.plugins import EntryPointPluginManager
- from .noseclasses import KnownFailurePlugin, Unplugger
- plugins = [KnownFailurePlugin()]
- plugins += [p() for p in nose.plugins.builtin.plugins]
- try:
- # External plugins (like nose-timer)
- entrypoint_manager = EntryPointPluginManager()
- entrypoint_manager.loadPlugins()
- plugins += [p for p in entrypoint_manager.plugins]
- except ImportError:
- # Relies on pkg_resources, not a hard dependency
- pass
-
- # add doctesting if required
- doctest_argv = '--with-doctest' in argv
- if doctests == False and doctest_argv:
- doctests = True
- plug = self._get_custom_doctester()
- if plug is None:
- # use standard doctesting
- if doctests and not doctest_argv:
- argv += ['--with-doctest']
- else: # custom doctesting
- if doctest_argv: # in fact the unplugger would take care of this
- argv.remove('--with-doctest')
- plugins += [Unplugger('doctest'), plug]
- if doctests:
- argv += ['--with-' + plug.name]
- return argv, plugins
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None,
- timer=False):
- """
- Run tests for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the tests to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow tests as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for test outputs, in the range 1..3. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
- doctests : bool, optional
- If True, run doctests in module. Default is False.
- coverage : bool, optional
- If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- The default is to use the class initialization value.
- timer : bool or int, optional
- Timing of individual tests with ``nose-timer`` (which needs to be
- installed). If True, time tests and report on all of them.
- If an integer (say ``N``), report timing results for ``N`` slowest
- tests.
-
- Returns
- -------
- result : object
- Returns the result of running the tests as a
- ``nose.result.TextTestResult`` object.
-
- Notes
- -----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
-
- >>> np.lib.test() #doctest: +SKIP
-
- Examples
- --------
- >>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
- ...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
- """
-
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- from . import utils
- utils.verbose = verbose
-
- argv, plugins = self.prepare_test_args(
- label, verbose, extra_argv, doctests, coverage, timer)
-
- if doctests:
- print("Running unit tests and doctests for %s" % self.package_name)
- else:
- print("Running unit tests for %s" % self.package_name)
-
- self._show_system_info()
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
- if raise_warnings is None:
- raise_warnings = self.raise_warnings
-
- _warn_opts = dict(develop=(Warning,),
- release=())
- if isinstance(raise_warnings, basestring):
- raise_warnings = _warn_opts[raise_warnings]
-
- with suppress_warnings("location") as sup:
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # Set all warnings to 'warn', this is because the default 'once'
- # has the bad property of possibly shadowing later warnings.
- warnings.filterwarnings('always')
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- sup.filter(message='Not importing directory')
- sup.filter(message="numpy.dtype size changed")
- sup.filter(message="numpy.ufunc size changed")
- sup.filter(category=np.ModuleDeprecationWarning)
- # Filter out boolean '-' deprecation messages. This allows
- # older versions of scipy to test without a flood of messages.
- sup.filter(message=".*boolean negative.*")
- sup.filter(message=".*boolean subtract.*")
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- from ...distutils import cpuinfo
- sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
- # Filter out some deprecation warnings inside nose 1.3.7 when run
- # on python 3.5b2. See
- # https://github.com/nose-devs/nose/issues/929
- # Note: it is hard to filter based on module for sup (lineno could
- # be implemented).
- warnings.filterwarnings("ignore", message=".*getargspec.*",
- category=DeprecationWarning,
- module=r"nose\.")
-
- from .noseclasses import NumpyTestProgram
-
- t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
-
- return t.result
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- """
- Run benchmarks for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the benchmarks to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow benchmarks as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Integer in range 1..3, bigger means more verbose.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- success : bool
- Returns True if running the benchmarks works, False if an error
- occurred.
-
- Notes
- -----
- Benchmarks are like tests, but have names starting with "bench" instead
- of "test", and can be found under the "benchmarks" sub-directory of the
- module.
-
- Each NumPy module exposes `bench` in its namespace to run all benchmarks
- for it.
-
- Examples
- --------
- >>> success = np.lib.bench() #doctest: +SKIP
- Running benchmarks for numpy.lib
- ...
- using 562341 items:
- unique:
- 0.11
- unique1d:
- 0.11
- ratio: 1.0
- nUnique: 56230 == 56230
- ...
- OK
-
- >>> success #doctest: +SKIP
- True
-
- """
-
- print("Running benchmarks for %s" % self.package_name)
- self._show_system_info()
-
- argv = self._test_argv(label, verbose, extra_argv)
- argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
-
- # import nose or make informative error
- nose = import_nose()
-
- # get plugin to disable doctests
- from .noseclasses import Unplugger
- add_plugins = [Unplugger('doctest')]
-
- return nose.run(argv=argv, addplugins=add_plugins)
-else:
-
- class NoseTester(object):
- def __init__(self, package=None, raise_warnings="release", depth=0):
- pass
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None,
- timer=False):
- pass
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- pass
-
-
-def _numpy_tester():
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- mode = "develop"
- else:
- mode = "release"
- return NoseTester(raise_warnings=mode, depth=1)
+++ /dev/null
-"""
-Utility function to facilitate testing.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import re
-import operator
-import warnings
-from functools import partial, wraps
-import shutil
-import contextlib
-from tempfile import mkdtemp, mkstemp
-
-from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-__all__ = [
- 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
- 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
- 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
- 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
- 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
- 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
- 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
- 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
- ]
-
-
-class KnownFailureException(Exception):
- """Raise this exception to mark a test as a known failing test.
-
- """
- def __new__(cls, *args, **kwargs):
- # import _pytest here to avoid hard dependency
- import _pytest
- return _pytest.skipping.xfail(*args, **kwargs)
-
-
-class SkipTest(Exception):
- """Raise this exception to mark a skipped test.
-
- """
- def __new__(cls, *args, **kwargs):
- # import _pytest here to avoid hard dependency
- import _pytest
- return _pytest.runner.Skipped(*args, **kwargs)
-
-
-class IgnoreException(Exception):
- """Ignoring this exception due to disabled feature
-
- This exception seems unused and can be removed.
-
- """
- pass
-
-
-KnownFailureTest = KnownFailureException # backwards compat
-
-verbose = 0
-
-IS_PYPY = '__pypy__' in sys.modules
-HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
-
-
-def import_nose():
- """ Not wanted for pytest, make it a dummy function
-
- """
- pass
-
-
-def assert_(val, msg=''):
- """
- Assert that works in release mode.
- Accepts callable msg to allow deferring evaluation until failure.
-
- The Python built-in ``assert`` does not work when executing code in
- optimized mode (the ``-O`` flag) - no byte-code is generated for it.
-
- For documentation on usage, refer to the Python documentation.
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if not val:
- try:
- smsg = msg()
- except TypeError:
- smsg = msg
- raise AssertionError(smsg)
-
-
-def gisnan(x):
- """like isnan, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isnan and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isnan
- st = isnan(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isnan not supported for this type")
- return st
-
-
-def gisfinite(x):
- """like isfinite, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isfinite and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, errstate
- with errstate(invalid='ignore'):
- st = isfinite(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isfinite not supported for this type")
- return st
-
-
-def gisinf(x):
- """like isinf, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isinf and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, errstate
- with errstate(invalid='ignore'):
- st = isinf(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isinf not supported for this type")
- return st
-
-
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
-
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
-elif sys.platform[:5] == 'linux':
-
- def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """
- Return virtual memory size in bytes of the running python.
-
- """
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[22])
- except Exception:
- return
-else:
- def memusage():
- """
- Return memory usage of running python. [Not implemented]
-
- """
- raise NotImplementedError
-
-
-if sys.platform[:5] == 'linux':
- def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
- _load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[13])
- except Exception:
- return int(100*(time.time()-_load_time[0]))
-else:
- # os.getpid is not in all platforms available.
- # Using time is safe but inaccurate, especially when process
- # was suspended or sleeping.
- def jiffies(_load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- return int(100*(time.time()-_load_time[0]))
-
-
-def build_err_msg(arrays, err_msg, header='Items are not equal:',
- verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
- msg = ['\n' + header]
- if err_msg:
- if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
- msg = [msg[0] + ' ' + err_msg]
- else:
- msg.append(err_msg)
- if verbose:
- for i, a in enumerate(arrays):
-
- if isinstance(a, ndarray):
- # precision argument is only needed if the objects are ndarrays
- r_func = partial(array_repr, precision=precision)
- else:
- r_func = repr
-
- try:
- r = r_func(a)
- except Exception as exc:
- r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
- if r.count('\n') > 3:
- r = '\n'.join(r.splitlines()[:3])
- r += '...'
- msg.append(' %s: %s' % (names[i], r))
- return '\n'.join(msg)
-
-
-def assert_equal(actual, desired, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal.
-
- Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
- check that all elements of these objects are equal. An exception is raised
- at the first conflicting values.
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal.
-
- Examples
- --------
- >>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- item=1
- ACTUAL: 5
- DESIRED: 6
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if isinstance(desired, dict):
- if not isinstance(actual, dict):
- raise AssertionError(repr(type(actual)))
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k, i in desired.items():
- if k not in actual:
- raise AssertionError(repr(k))
- assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
- return
- if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k in range(len(desired)):
- assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
- return
- from numpy.core import ndarray, isscalar, signbit
- from numpy.lib import iscomplexobj, real, imag
- if isinstance(actual, ndarray) or isinstance(desired, ndarray):
- return assert_array_equal(actual, desired, err_msg, verbose)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_equal(actualr, desiredr)
- assert_equal(actuali, desiredi)
- except AssertionError:
- raise AssertionError(msg)
-
- # isscalar test to check cases such as [np.nan] != np.nan
- if isscalar(desired) != isscalar(actual):
- raise AssertionError(msg)
-
- # Inf/nan/negative zero handling
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan or isactnan:
- if not (isdesnan and isactnan):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- elif desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- # If both are NaT (and have the same dtype -- datetime or timedelta)
- # they are considered equal.
- if (isnat(desired) == isnat(actual) and
- array(desired).dtype.type == array(actual).dtype.type):
- return
- else:
- raise AssertionError(msg)
-
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- # Explicitly use __eq__ for comparison, ticket #2552
- if not (desired == actual):
- raise AssertionError(msg)
-
-
-def print_assert_equal(test_string, actual, desired):
- """
- Test if two objects are equal, and print an error message if test fails.
-
- The test is performed with ``actual == desired``.
-
- Parameters
- ----------
- test_string : str
- The message supplied to AssertionError.
- actual : object
- The object to test for equality against `desired`.
- desired : object
- The expected result.
-
- Examples
- --------
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
- Traceback (most recent call last):
- ...
- AssertionError: Test XYZ of func xyz failed
- ACTUAL:
- [0, 1]
- DESIRED:
- [0, 2]
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import pprint
-
- if not (actual == desired):
- msg = StringIO()
- msg.write(test_string)
- msg.write(' failed\nACTUAL: \n')
- pprint.pprint(actual, msg)
- msg.write('DESIRED: \n')
- pprint.pprint(desired, msg)
- raise AssertionError(msg.getvalue())
-
-
-def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies that the elements of ``actual`` and ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation in `assert_array_almost_equal` did up to rounding
- vagaries. An exception is raised at conflicting values. For ndarrays this
- delegates to assert_array_almost_equal
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- decimal : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> import numpy.testing as npt
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
-
- >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
- ... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import ndarray
- from numpy.lib import iscomplexobj, real, imag
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- def _build_err_msg():
- header = ('Arrays are not almost equal to %d decimals' % decimal)
- return build_err_msg([actual, desired], err_msg, verbose=verbose,
- header=header)
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_almost_equal(actualr, desiredr, decimal=decimal)
- assert_almost_equal(actuali, desiredi, decimal=decimal)
- except AssertionError:
- raise AssertionError(_build_err_msg())
-
- if isinstance(actual, (ndarray, tuple, list)) \
- or isinstance(desired, (ndarray, tuple, list)):
- return assert_array_almost_equal(actual, desired, decimal, err_msg)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(_build_err_msg())
- else:
- if not desired == actual:
- raise AssertionError(_build_err_msg())
- return
- except (NotImplementedError, TypeError):
- pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
- raise AssertionError(_build_err_msg())
-
-
-def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to significant
- digits.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- Given two numbers, check that they are approximately equal.
- Approximately equal is defined as the number of significant digits
- that agree.
-
- Parameters
- ----------
- actual : scalar
- The object to check.
- desired : scalar
- The expected object.
- significant : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
-
- the evaluated condition that raises the exception is
-
- >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
- True
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- (actual, desired) = map(float, (actual, desired))
- if desired == actual:
- return
- # Normalized the numbers to be in range (-10.0,10.0)
- # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- with np.errstate(invalid='ignore'):
- scale = 0.5*(np.abs(desired) + np.abs(actual))
- scale = np.power(10, np.floor(np.log10(scale)))
- try:
- sc_desired = desired/scale
- except ZeroDivisionError:
- sc_desired = 0.0
- try:
- sc_actual = actual/scale
- except ZeroDivisionError:
- sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- except (TypeError, NotImplementedError):
- pass
- if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
- raise AssertionError(msg)
-
-
-def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
- header='', precision=6, equal_nan=True,
- equal_inf=True):
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, isinf, any, inf
- x = array(x, copy=False, subok=True)
- y = array(y, copy=False, subok=True)
-
- def isnumber(x):
- return x.dtype.char in '?bhilqpBHILQPefdgFDG'
-
- def istime(x):
- return x.dtype.char in "Mm"
-
- def chk_same_position(x_id, y_id, hasval='nan'):
- """Handling nan/inf: check that x and y have the nan/inf at the same
- locations."""
- try:
- assert_array_equal(x_id, y_id)
- except AssertionError:
- msg = build_err_msg([x, y],
- err_msg + '\nx and y %s location mismatch:'
- % (hasval), verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- try:
- cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
- if not cond:
- msg = build_err_msg([x, y],
- err_msg
- + '\n(shapes %s, %s mismatch)' % (x.shape,
- y.shape),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- if isnumber(x) and isnumber(y):
- has_nan = has_inf = False
- if equal_nan:
- x_isnan, y_isnan = isnan(x), isnan(y)
- # Validate that NaNs are in the same place
- has_nan = any(x_isnan) or any(y_isnan)
- if has_nan:
- chk_same_position(x_isnan, y_isnan, hasval='nan')
-
- if equal_inf:
- x_isinf, y_isinf = isinf(x), isinf(y)
- # Validate that infinite values are in the same place
- has_inf = any(x_isinf) or any(y_isinf)
- if has_inf:
- # Check +inf and -inf separately, since they are different
- chk_same_position(x == +inf, y == +inf, hasval='+inf')
- chk_same_position(x == -inf, y == -inf, hasval='-inf')
-
- if has_nan and has_inf:
- x = x[~(x_isnan | x_isinf)]
- y = y[~(y_isnan | y_isinf)]
- elif has_nan:
- x = x[~x_isnan]
- y = y[~y_isnan]
- elif has_inf:
- x = x[~x_isinf]
- y = y[~y_isinf]
-
- # Only do the comparison if actual values are left
- if x.size == 0:
- return
-
- elif istime(x) and istime(y):
- # If one is datetime64 and the other timedelta64 there is no point
- if equal_nan and x.dtype.type == y.dtype.type:
- x_isnat, y_isnat = isnat(x), isnat(y)
-
- if any(x_isnat) or any(y_isnat):
- chk_same_position(x_isnat, y_isnat, hasval="NaT")
-
- if any(x_isnat) or any(y_isnat):
- x = x[~x_isnat]
- y = y[~y_isnat]
-
- val = comparison(x, y)
-
- if isinstance(val, bool):
- cond = val
- reduced = [0]
- else:
- reduced = val.ravel()
- cond = reduced.all()
- reduced = reduced.tolist()
- if not cond:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- if not cond:
- raise AssertionError(msg)
- except ValueError:
- import traceback
- efmt = traceback.format_exc()
- header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
-
- msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise ValueError(msg)
-
-
-def assert_array_equal(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not equal.
-
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
-
- The usual caution for verifying equality with floating point numbers is
- advised.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- The first assert does not raise an exception:
-
- >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
- ... [np.exp(0),2.33333, np.nan])
-
- Assert fails with numerical inprecision with floats:
-
- >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- AssertionError:
- Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
-
- Use `assert_allclose` or one of the nulp (number of floating point values)
- functions for these cases instead:
-
- >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan],
- ... rtol=1e-10, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
- verbose=verbose, header='Arrays are not equal')
-
-
-def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies identical shapes and that the elements of ``actual`` and
- ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation did up to rounding vagaries. An exception is raised
- at shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if both
- objects have NaNs in the same positions.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- decimal : int, optional
- Desired precision, default is 6.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- the first assert does not raise an exception
-
- >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
- AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
- from numpy.core.numerictypes import issubdtype
- from numpy.core.fromnumeric import any as npany
-
- def compare(x, y):
- try:
- if npany(gisinf(x)) or npany( gisinf(y)):
- xinfid = gisinf(x)
- yinfid = gisinf(y)
- if not (xinfid == yinfid).all():
- return False
- # if one item, x and y is +- inf
- if x.size == y.size == 1:
- return x == y
- x = x[~xinfid]
- y = y[~yinfid]
- except (TypeError, NotImplementedError):
- pass
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x - y)
-
- if not issubdtype(z.dtype, number):
- z = z.astype(float_) # handle object arrays
-
- return z < 1.5 * 10.0**(-decimal)
-
- assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
- header=('Arrays are not almost equal to %d decimals' % decimal),
- precision=decimal)
-
-
-def assert_array_less(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not ordered by less
- than.
-
- Given two array_like objects, check that the shape is equal and all
- elements of the first object are strictly smaller than those of the
- second object. An exception is raised at shape mismatch or incorrectly
- ordered values. Shape mismatch does not raise if an object has zero
- dimension. In contrast to the standard usage in numpy, NaNs are
- compared, no assertion is raised if both objects have NaNs in the same
- positions.
-
-
-
- Parameters
- ----------
- x : array_like
- The smaller object to check.
- y : array_like
- The larger object to compare.
- err_msg : string
- The error message to be printed in case of failure.
- verbose : bool
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_array_equal: tests objects for equality
- assert_array_almost_equal: test objects for equality up to precision
-
-
-
- Examples
- --------
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
-
- >>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
- y: array(3)
-
- >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
- y: array([4])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
- verbose=verbose,
- header='Arrays are not less-ordered',
- equal_inf=False)
-
-
-def runstring(astr, dict):
- exec(astr, dict)
-
-
-def assert_string_equal(actual, desired):
- """
- Test if two strings are equal.
-
- If the given strings are equal, `assert_string_equal` does nothing.
- If they are not equal, an AssertionError is raised, and the diff
- between the strings is shown.
-
- Parameters
- ----------
- actual : str
- The string to test for equality against the expected string.
- desired : str
- The expected string.
-
- Examples
- --------
- >>> np.testing.assert_string_equal('abc', 'abc')
- >>> np.testing.assert_string_equal('abc', 'abcd')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ...
- AssertionError: Differences in strings:
- - abc+ abcd? +
-
- """
- # delay import of difflib to reduce startup time
- __tracebackhide__ = True # Hide traceback for py.test
- import difflib
-
- if not isinstance(actual, str):
- raise AssertionError(repr(type(actual)))
- if not isinstance(desired, str):
- raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
- return
-
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
- diff_list = []
- while diff:
- d1 = diff.pop(0)
- if d1.startswith(' '):
- continue
- if d1.startswith('- '):
- l = [d1]
- d2 = diff.pop(0)
- if d2.startswith('? '):
- l.append(d2)
- d2 = diff.pop(0)
- if not d2.startswith('+ '):
- raise AssertionError(repr(d2))
- l.append(d2)
- if diff:
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
- continue
- diff_list.extend(l)
- continue
- raise AssertionError(repr(d1))
- if not diff_list:
- return
- msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
- if actual != desired:
- raise AssertionError(msg)
-
-
-def rundocs(filename=None, raise_on_error=True):
- """
- Run doctests found in the given file.
-
- By default `rundocs` raises an AssertionError on failure.
-
- Parameters
- ----------
- filename : str
- The path to the file for which the doctests are run.
- raise_on_error : bool
- Whether to raise an AssertionError when a doctest fails. Default is
- True.
-
- Notes
- -----
- The doctests can be run by the user/developer by adding the ``doctests``
- argument to the ``test()`` call. For example, to run all tests (including
- doctests) for `numpy.lib`:
-
- >>> np.lib.test(doctests=True) #doctest: +SKIP
- """
- from numpy.compat import npy_load_module
- import doctest
- if filename is None:
- f = sys._getframe(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
-
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
-
- msg = []
- if raise_on_error:
- out = lambda s: msg.append(s)
- else:
- out = None
-
- for test in tests:
- runner.run(test, out=out)
-
- if runner.failures > 0 and raise_on_error:
- raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-
-
-def raises(*exceptions):
- """
- This is actually a decorator and belongs in decorators.py.
-
- """
- import pytest
-
- def raises_decorator(f):
-
- def raiser(*args, **kwargs):
- try:
- f(*args, **kwargs)
- except exceptions:
- return
- raise AssertionError()
-
- return raiser
-
-
- return raises_decorator
-
-
-def assert_raises(exception_class, fn=None, *args, **kwargs):
- """
- assert_raises(exception_class, callable, *args, **kwargs)
- assert_raises(exception_class)
-
- Fail unless an exception of class exception_class is thrown
- by callable when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
-
- Alternatively, `assert_raises` can be used as a context manager:
-
- >>> from numpy.testing import assert_raises
- >>> with assert_raises(ZeroDivisionError):
- ... 1 / 0
-
- is equivalent to
-
- >>> def div(x, y):
- ... return x / y
- >>> assert_raises(ZeroDivisionError, div, 1, 0)
-
- """
- import pytest
-
- __tracebackhide__ = True # Hide traceback for py.test
-
- if fn is not None:
- pytest.raises(exception_class, fn, *args,**kwargs)
- else:
- assert not kwargs
-
- return pytest.raises(exception_class)
-
-
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
- """
- assert_raises_regex(exception_class, expected_regexp, callable, *args,
- **kwargs)
- assert_raises_regex(exception_class, expected_regexp)
-
- Fail unless an exception of class exception_class and with message that
- matches expected_regexp is thrown by callable when invoked with arguments
- args and keyword arguments kwargs.
-
- Alternatively, can be used as a context manager like `assert_raises`.
-
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
- Notes
- -----
- .. versionadded:: 1.9.0
-
- """
- import pytest
- import unittest
-
- class Dummy(unittest.TestCase):
- def do_nothing(self):
- pass
-
- tmp = Dummy('do_nothing')
-
- __tracebackhide__ = True # Hide traceback for py.test
- res = pytest.raises(exception_class, *args, **kwargs)
-
- if sys.version_info.major >= 3:
- funcname = tmp.assertRaisesRegex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = tmp.assertRaisesRegexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
-
-
-def decorate_methods(cls, decorator, testmatch=None):
- """
- Apply a decorator to all methods in a class matching a regular expression.
-
- The given decorator is applied to all public methods of `cls` that are
- matched by the regular expression `testmatch`
- (``testmatch.search(methodname)``). Methods that are private, i.e. start
- with an underscore, are ignored.
-
- Parameters
- ----------
- cls : class
- Class whose methods to decorate.
- decorator : function
- Decorator to apply to methods
- testmatch : compiled regexp or str, optional
- The regular expression. Default value is None, in which case the
- nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
- is used.
- If `testmatch` is a string, it is compiled to a regular expression
- first.
-
- """
- if testmatch is None:
- testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
- else:
- testmatch = re.compile(testmatch)
- cls_attr = cls.__dict__
-
- # delayed import to reduce startup time
- from inspect import isfunction
-
- methods = [_m for _m in cls_attr.values() if isfunction(_m)]
- for function in methods:
- try:
- if hasattr(function, 'compat_func_name'):
- funcname = function.compat_func_name
- else:
- funcname = function.__name__
- except AttributeError:
- # not a function
- continue
- if testmatch.search(funcname) and not funcname.startswith('_'):
- setattr(cls, funcname, decorator(function))
- return
-
-
-def measure(code_str,times=1,label=None):
- """
- Return elapsed time for executing code in the namespace of the caller.
-
- The supplied code string is compiled with the Python builtin ``compile``.
- The precision of the timing is 10 milli-seconds. If the code will execute
- fast on this timescale, it can be executed many times to get reasonable
- timing accuracy.
-
- Parameters
- ----------
- code_str : str
- The code to be timed.
- times : int, optional
- The number of times the code is executed. Default is 1. The code is
- only compiled once.
- label : str, optional
- A label to identify `code_str` with. This is passed into ``compile``
- as the second argument (for run-time error messages).
-
- Returns
- -------
- elapsed : float
- Total elapsed time in seconds for executing `code_str` `times` times.
-
- Examples
- --------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
- Time for a single execution : 0.005 s
-
- """
- frame = sys._getframe(1)
- locs, globs = frame.f_locals, frame.f_globals
-
- code = compile(code_str,
- 'Test name: %s ' % label,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i < times:
- i += 1
- exec(code, globs, locs)
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
-
-def _assert_valid_refcount(op):
- """
- Check that ufuncs don't mishandle refcount of object `1`.
- Used in a few regression tests.
- """
- if not HAS_REFCOUNT:
- return True
- import numpy as np
-
- b = np.arange(100*100).reshape(100, 100)
- c = b
- i = 1
-
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
- del d # for pyflakes
-
-
-def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
- err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- tolerance.
-
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
-
- .. versionadded:: 1.5.0
-
- Parameters
- ----------
- actual : array_like
- Array obtained.
- desired : array_like
- Array desired.
- rtol : float, optional
- Relative tolerance.
- atol : float, optional
- Absolute tolerance.
- equal_nan : bool, optional.
- If True, NaNs will compare equal.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_array_almost_equal_nulp, assert_array_max_ulp
-
- Examples
- --------
- >>> x = [1e-5, 1e-3, 1e-1]
- >>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- def compare(x, y):
- return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
- equal_nan=equal_nan)
-
- actual, desired = np.asanyarray(actual), np.asanyarray(desired)
- header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
- assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
- verbose=verbose, header=header, equal_nan=equal_nan)
-
-
-def assert_array_almost_equal_nulp(x, y, nulp=1):
- """
- Compare two arrays relatively to their spacing.
-
- This is a relatively robust method to compare two arrays whose amplitude
- is variable.
-
- Parameters
- ----------
- x, y : array_like
- Input arrays.
- nulp : int, optional
- The maximum number of unit in the last place for tolerance (see Notes).
- Default is 1.
-
- Returns
- -------
- None
-
- Raises
- ------
- AssertionError
- If the spacing between `x` and `y` for one or more elements is larger
- than `nulp`.
-
- See Also
- --------
- assert_array_max_ulp : Check that all items of arrays differ in at most
- N Units in the Last Place.
- spacing : Return the distance between x and the nearest adjacent number.
-
- Notes
- -----
- An assertion is raised if the following condition is not met::
-
- abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
-
- Examples
- --------
- >>> x = np.array([1., 1e-10, 1e-20])
- >>> eps = np.finfo(x.dtype).eps
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
-
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
- Traceback (most recent call last):
- ...
- AssertionError: X and Y are not equal to 1 ULP (max is 2)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ax = np.abs(x)
- ay = np.abs(y)
- ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
- if not np.all(np.abs(x-y) <= ref):
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- msg = "X and Y are not equal to %d ULP" % nulp
- else:
- max_nulp = np.max(nulp_diff(x, y))
- msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
- raise AssertionError(msg)
-
-
-def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
- """
- Check that all items of arrays differ in at most N Units in the Last Place.
-
- Parameters
- ----------
- a, b : array_like
- Input arrays to be compared.
- maxulp : int, optional
- The maximum number of units in the last place that elements of `a` and
- `b` can differ. Default is 1.
- dtype : dtype, optional
- Data-type to convert `a` and `b` to if given. Default is None.
-
- Returns
- -------
- ret : ndarray
- Array containing number of representable floating point numbers between
- items in `a` and `b`.
-
- Raises
- ------
- AssertionError
- If one or more elements differ by more than `maxulp`.
-
- See Also
- --------
- assert_array_almost_equal_nulp : Compare two arrays relatively to their
- spacing.
-
- Examples
- --------
- >>> a = np.linspace(0., 1., 100)
- >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ret = nulp_diff(a, b, dtype)
- if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
- return ret
-
-
-def nulp_diff(x, y, dtype=None):
- """For each item in x and y, return the number of representable floating
- points between them.
-
- Parameters
- ----------
- x : array_like
- first input array
- y : array_like
- second input array
- dtype : dtype, optional
- Data-type to convert `x` and `y` to if given. Default is None.
-
- Returns
- -------
- nulp : array_like
- number of representable floating point numbers between each item in x
- and y.
-
- Examples
- --------
- # By definition, epsilon is the smallest number such as 1 + eps != 1, so
- # there should be exactly one ULP between 1 and 1 + eps
- >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
- 1.0
- """
- import numpy as np
- if dtype:
- x = np.array(x, dtype=dtype)
- y = np.array(y, dtype=dtype)
- else:
- x = np.array(x)
- y = np.array(y)
-
- t = np.common_type(x, y)
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- raise NotImplementedError("_nulp not implemented for complex array")
-
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
-
- if not x.shape == y.shape:
- raise ValueError("x and y do not have the same shape: %s - %s" %
- (x.shape, y.shape))
-
- def _diff(rx, ry, vdt):
- diff = np.array(rx-ry, dtype=vdt)
- return np.abs(diff)
-
- rx = integer_repr(x)
- ry = integer_repr(y)
- return _diff(rx, ry, t)
-
-
-def _integer_repr(x, vdt, comp):
- # Reinterpret binary representation of the float as sign-magnitude:
- # take into account two-complement representation
- # See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
- rx = x.view(vdt)
- if not (rx.size == 1):
- rx[rx < 0] = comp - rx[rx < 0]
- else:
- if rx < 0:
- rx = comp - rx
-
- return rx
-
-
-def integer_repr(x):
- """Return the signed-magnitude interpretation of the binary representation of
- x."""
- import numpy as np
- if x.dtype == np.float16:
- return _integer_repr(x, np.int16, np.int16(-2**15))
- elif x.dtype == np.float32:
- return _integer_repr(x, np.int32, np.int32(-2**31))
- elif x.dtype == np.float64:
- return _integer_repr(x, np.int64, np.int64(-2**63))
- else:
- raise ValueError("Unsupported dtype %s" % x.dtype)
-
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-
-class WarningManager(object):
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
-
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
-
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
- def __exit__(self):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-
-@contextlib.contextmanager
-def _assert_warns_context(warning_class, name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with suppress_warnings() as sup:
- l = sup.record(warning_class)
- yield
- if not len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("No warning raised" + name_str)
-
-
-def assert_warns(warning_class, *args, **kwargs):
- """
- Fail unless the given callable throws the specified warning.
-
- A warning of class warning_class should be thrown by the callable when
- invoked with arguments args and keyword arguments kwargs.
- If a different type of warning is thrown, it will not be caught.
-
- If called with all arguments other than the warning class omitted, may be
- used as a context manager:
-
- with assert_warns(SomeWarning):
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.4.0
-
- Parameters
- ----------
- warning_class : class
- The class defining the warning that `func` is expected to throw.
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_warns_context(warning_class)
-
- func = args[0]
- args = args[1:]
- with _assert_warns_context(warning_class, name=func.__name__):
- return func(*args, **kwargs)
-
-
-@contextlib.contextmanager
-def _assert_no_warnings_context(name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with warnings.catch_warnings(record=True) as l:
- warnings.simplefilter('always')
- yield
- if len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("Got warnings%s: %s" % (name_str, l))
-
-
-def assert_no_warnings(*args, **kwargs):
- """
- Fail if the given callable produces any warnings.
-
- If called with all arguments omitted, may be used as a context manager:
-
- with assert_no_warnings():
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_no_warnings_context()
-
- func = args[0]
- args = args[1:]
- with _assert_no_warnings_context(name=func.__name__):
- return func(*args, **kwargs)
-
-
-def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
- """
- generator producing data with different alignment and offsets
- to test simd vectorization
-
- Parameters
- ----------
- dtype : dtype
- data type to produce
- type : string
- 'unary': create data for unary operations, creates one input
- and output array
- 'binary': create data for unary operations, creates two input
- and output array
- max_size : integer
- maximum size of data to produce
-
- Returns
- -------
- if type is 'unary' yields one output, one input array and a message
- containing information on the data
- if type is 'binary' yields one output array, two input array and a message
- containing information on the data
-
- """
- ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
- bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
- for o in range(3):
- for s in range(o + 2, max(o + 3, max_size)):
- if type == 'unary':
- inp = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
- d = inp()
- yield d, d, ufmt % (o, o, s, dtype, 'in place')
- yield out[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'out of place')
- yield inp()[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'aliased')
- yield inp()[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'aliased')
- if type == 'binary':
- inp1 = lambda: arange(s, dtype=dtype)[o:]
- inp2 = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp1(), inp2(), bfmt % \
- (o, o, o, s, dtype, 'out of place')
- d = inp1()
- yield d, d, inp2(), bfmt % \
- (o, o, o, s, dtype, 'in place1')
- d = inp2()
- yield d, inp1(), d, bfmt % \
- (o, o, o, s, dtype, 'in place2')
- yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'out of place')
- yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'aliased')
-
-
-
-@contextlib.contextmanager
-def tempdir(*args, **kwargs):
- """Context manager to provide a temporary test folder.
-
- All arguments are passed as this to the underlying tempfile.mkdtemp
- function.
-
- """
- tmpdir = mkdtemp(*args, **kwargs)
- try:
- yield tmpdir
- finally:
- shutil.rmtree(tmpdir)
-
-
-@contextlib.contextmanager
-def temppath(*args, **kwargs):
- """Context manager for temporary files.
-
- Context manager that returns the path to a closed temporary file. Its
- parameters are the same as for tempfile.mkstemp and are passed directly
- to that function. The underlying file is removed when the context is
- exited, so it should be closed at that time.
-
- Windows does not allow a temporary file to be opened if it is already
- open, so the underlying file must be closed after opening before it
- can be opened again.
-
- """
- fd, path = mkstemp(*args, **kwargs)
- os.close(fd)
- try:
- yield path
- finally:
- os.remove(path)
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- """ Context manager that resets warning registry for catching warnings
-
- Warnings can be slippery, because, whenever a warning is triggered, Python
- adds a ``__warningregistry__`` member to the *calling* module. This makes
- it impossible to retrigger the warning in this module, whatever you put in
- the warnings filters. This context manager accepts a sequence of `modules`
- as a keyword argument to its constructor and:
-
- * stores and removes any ``__warningregistry__`` entries in given `modules`
- on entry;
- * resets ``__warningregistry__`` to its previous state on exit.
-
- This makes it possible to trigger any warning afresh inside the context
- manager without disturbing the state of warnings outside.
-
- For compatibility with Python 3.0, please consider all arguments to be
- keyword-only.
-
- Parameters
- ----------
- record : bool, optional
- Specifies whether warnings should be captured by a custom
- implementation of ``warnings.showwarning()`` and be appended to a list
- returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
- modules : sequence, optional
- Sequence of modules for which to reset warnings registry on entry and
- restore on exit. To work correctly, all 'ignore' filters should
- filter by one of these modules.
-
- Examples
- --------
- >>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
- ... warnings.simplefilter('always')
- ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
- ... # do something that raises a warning but ignore those in
- ... # np.core.fromnumeric
- """
- class_modules = ()
-
- def __init__(self, record=False, modules=()):
- self.modules = set(modules).union(self.class_modules)
- self._warnreg_copies = {}
- super(clear_and_catch_warnings, self).__init__(record=record)
-
- def __enter__(self):
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod_reg = mod.__warningregistry__
- self._warnreg_copies[mod] = mod_reg.copy()
- mod_reg.clear()
- return super(clear_and_catch_warnings, self).__enter__()
-
- def __exit__(self, *exc_info):
- super(clear_and_catch_warnings, self).__exit__(*exc_info)
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod.__warningregistry__.clear()
- if mod in self._warnreg_copies:
- mod.__warningregistry__.update(self._warnreg_copies[mod])
-
-
-class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
-
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
-
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
-
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to "always".
- "location" is equivalent to the warnings "default", match by exact
- location the warning warning originated from.
-
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
-
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
-
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
-
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
-
- Like ``catch_warnings`` this context manager is not threadsafe.
-
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are either instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None, record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
-
- All warnings matching will be appended to the ``log`` attribute.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message, module=module,
- record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, either recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
- config.add_subpackage('nose_tools')
- config.add_subpackage('pytest_tools')
+ config.add_subpackage('_private')
config.add_data_dir('tests')
return config
from __future__ import division, absolute_import, print_function
import warnings
+import pytest
-from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
- SkipTest, KnownFailureException)
+from numpy.testing import (
+ assert_, assert_raises, dec, SkipTest, KnownFailureException,
+ )
-def test_slow():
- @dec.slow
- def slow_func(x, y, z):
- pass
-
- assert_(slow_func.slow)
+try:
+ import nose
+except ImportError:
+ HAVE_NOSE = False
+else:
+ HAVE_NOSE = True
-def test_setastest():
- @dec.setastest()
- def f_default(a):
- pass
-
- @dec.setastest(True)
- def f_istest(a):
- pass
+@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
+class TestNoseDecorators(object):
+ # These tests are run in a class for simplicity while still
+ # getting a report on each, skipped or success.
- @dec.setastest(False)
- def f_isnottest(a):
+ class DidntSkipException(Exception):
pass
- assert_(f_default.__test__)
- assert_(f_istest.__test__)
- assert_(not f_isnottest.__test__)
-
-
-class DidntSkipException(Exception):
- pass
-
-def test_skip_functions_hardcoded():
- @dec.skipif(True)
- def f1(x):
- raise DidntSkipException
-
- try:
- f1('a')
- except DidntSkipException:
- raise Exception('Failed to skip')
- except SkipTest().__class__:
- pass
+ def test_slow(self):
+ import nose
+ @dec.slow
+ def slow_func(x, y, z):
+ pass
- @dec.skipif(False)
- def f2(x):
- raise DidntSkipException
-
- try:
- f2('a')
- except DidntSkipException:
- pass
- except SkipTest().__class__:
- raise Exception('Skipped when not expected to')
+ assert_(slow_func.slow)
+ def test_setastest(self):
+ @dec.setastest()
+ def f_default(a):
+ pass
-def test_skip_functions_callable():
- def skip_tester():
- return skip_flag == 'skip me!'
+ @dec.setastest(True)
+ def f_istest(a):
+ pass
- @dec.skipif(skip_tester)
- def f1(x):
- raise DidntSkipException
+ @dec.setastest(False)
+ def f_isnottest(a):
+ pass
- try:
- skip_flag = 'skip me!'
- f1('a')
- except DidntSkipException:
- raise Exception('Failed to skip')
- except SkipTest().__class__:
- pass
+ assert_(f_default.__test__)
+ assert_(f_istest.__test__)
+ assert_(not f_isnottest.__test__)
- @dec.skipif(skip_tester)
- def f2(x):
- raise DidntSkipException
- try:
- skip_flag = 'five is right out!'
- f2('a')
- except DidntSkipException:
- pass
- except SkipTest().__class__:
- raise Exception('Skipped when not expected to')
+ def test_skip_functions_hardcoded(self):
+ @dec.skipif(True)
+ def f1(x):
+ raise self.DidntSkipException
+ try:
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
+ pass
-def test_skip_generators_hardcoded():
- @dec.knownfailureif(True, "This test is known to fail")
- def g1(x):
- for i in range(x):
- yield i
+ @dec.skipif(False)
+ def f2(x):
+ raise self.DidntSkipException
- try:
- for j in g1(10):
+ try:
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+
+ def test_skip_functions_callable(self):
+ def skip_tester():
+ return skip_flag == 'skip me!'
+
+ @dec.skipif(skip_tester)
+ def f1(x):
+ raise self.DidntSkipException
+
+ try:
+ skip_flag = 'skip me!'
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
pass
- except KnownFailureException().__class__:
- pass
- else:
- raise Exception('Failed to mark as known failure')
- @dec.knownfailureif(False, "This test is NOT known to fail")
- def g2(x):
- for i in range(x):
- yield i
- raise DidntSkipException('FAIL')
+ @dec.skipif(skip_tester)
+ def f2(x):
+ raise self.DidntSkipException
- try:
- for j in g2(10):
+ try:
+ skip_flag = 'five is right out!'
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+
+ def test_skip_generators_hardcoded(self):
+ @dec.knownfailureif(True, "This test is known to fail")
+ def g1(x):
+ for i in range(x):
+ yield i
+
+ try:
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
+ pass
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @dec.knownfailureif(False, "This test is NOT known to fail")
+ def g2(x):
+ for i in range(x):
+ yield i
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
pass
- except KnownFailureException().__class__:
- raise Exception('Marked incorrectly as known failure')
- except DidntSkipException:
- pass
-
-def test_skip_generators_callable():
- def skip_tester():
- return skip_flag == 'skip me!'
+ def test_skip_generators_callable(self):
+ def skip_tester():
+ return skip_flag == 'skip me!'
- @dec.knownfailureif(skip_tester, "This test is known to fail")
- def g1(x):
- for i in range(x):
- yield i
+ @dec.knownfailureif(skip_tester, "This test is known to fail")
+ def g1(x):
+ for i in range(x):
+ yield i
- try:
- skip_flag = 'skip me!'
- for j in g1(10):
+ try:
+ skip_flag = 'skip me!'
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
pass
- except KnownFailureException().__class__:
- pass
- else:
- raise Exception('Failed to mark as known failure')
-
- @dec.knownfailureif(skip_tester, "This test is NOT known to fail")
- def g2(x):
- for i in range(x):
- yield i
- raise DidntSkipException('FAIL')
-
- try:
- skip_flag = 'do not skip'
- for j in g2(10):
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @dec.knownfailureif(skip_tester, "This test is NOT known to fail")
+ def g2(x):
+ for i in range(x):
+ yield i
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ skip_flag = 'do not skip'
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
pass
- except KnownFailureException().__class__:
- raise Exception('Marked incorrectly as known failure')
- except DidntSkipException:
- pass
+ def test_deprecated(self):
+ @dec.deprecated(True)
+ def non_deprecated_func():
+ pass
-def test_deprecated():
- @dec.deprecated(True)
- def non_deprecated_func():
- pass
-
- @dec.deprecated()
- def deprecated_func():
- import warnings
- warnings.warn("TEST: deprecated func", DeprecationWarning)
-
- @dec.deprecated()
- def deprecated_func2():
- import warnings
- warnings.warn("AHHHH")
- raise ValueError
-
- @dec.deprecated()
- def deprecated_func3():
- import warnings
- warnings.warn("AHHHH")
-
- # marked as deprecated, but does not raise DeprecationWarning
- assert_raises(AssertionError, non_deprecated_func)
- # should be silent
- deprecated_func()
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always") # do not propagate unrelated warnings
- # fails if deprecated decorator just disables test. See #1453.
- assert_raises(ValueError, deprecated_func2)
- # warning is not a DeprecationWarning
- assert_raises(AssertionError, deprecated_func3)
-
-
-@dec.parametrize('base, power, expected',
- [(1, 1, 1),
- (2, 1, 2),
- (2, 2, 4)])
-def test_parametrize(base, power, expected):
- assert_(base**power == expected)
-
-
-if __name__ == '__main__':
- run_module_suite()
+ @dec.deprecated()
+ def deprecated_func():
+ import warnings
+ warnings.warn("TEST: deprecated func", DeprecationWarning)
+
+ @dec.deprecated()
+ def deprecated_func2():
+ import warnings
+ warnings.warn("AHHHH")
+ raise ValueError
+
+ @dec.deprecated()
+ def deprecated_func3():
+ import warnings
+ warnings.warn("AHHHH")
+
+ # marked as deprecated, but does not raise DeprecationWarning
+ assert_raises(AssertionError, non_deprecated_func)
+ # should be silent
+ deprecated_func()
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("always") # do not propagate unrelated warnings
+ # fails if deprecated decorator just disables test. See #1453.
+ assert_raises(ValueError, deprecated_func2)
+ # warning is not a DeprecationWarning
+ assert_raises(AssertionError, deprecated_func3)
+
+ def test_parametrize(self):
+ # dec.parametrize assumes that it is being run by nose. Because
+ # we are running under pytest, we need to explicitly check the
+ # results.
+ @dec.parametrize('base, power, expected',
+ [(1, 1, 1),
+ (2, 1, 2),
+ (2, 2, 4)])
+ def check_parametrize(base, power, expected):
+ assert_(base**power == expected)
+
+ count = 0
+ for test in check_parametrize():
+ test[0](*test[1:])
+ count += 1
+ assert_(count == 3)
"""
from __future__ import division, absolute_import, print_function
+#FIXME: None of these tests is run, because 'check' is not a recognized
+# testing prefix.
+
# try the #random directive on the output line
def check_random_directive():
'''
import os
import itertools
import textwrap
+import pytest
+import weakref
import numpy as np
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_array_less, build_err_msg,
- raises, assert_raises, assert_warns, assert_no_warnings,
- assert_allclose, assert_approx_equal,
- assert_array_almost_equal_nulp, assert_array_max_ulp,
- clear_and_catch_warnings, suppress_warnings, run_module_suite,
- assert_string_equal, assert_, tempdir, temppath,
+ assert_array_almost_equal, assert_array_less, build_err_msg, raises,
+ assert_raises, assert_warns, assert_no_warnings, assert_allclose,
+ assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
+ clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
+ tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
self._test_not_equal(c, b)
assert_equal(len(l), 1)
+ def test_masked_nan_inf(self):
+ # Regression test for gh-11121
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
+ b = np.array([3., np.nan, 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
+ b = np.array([np.inf, 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+
class TestBuildErrorMessage(object):
def test_error_message(self):
try:
- self._assert_func(np.array([1, 2]), np.matrix([1, 2]))
+ self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
except AssertionError as e:
msg = str(e)
msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
(shapes (2,), (1, 2) mismatch)
x: array([1, 2])
- y: matrix([[1, 2]])""")
+ y: array([[1, 2]])""")
try:
assert_equal(msg, msg_reference)
except AssertionError:
self._assert_func(b, a)
self._assert_func(b, b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
+ # Test fully masked as well (see gh-11123).
+ a = np.ma.MaskedArray(3.5, mask=True)
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.masked
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array([1., 2., 3.])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array(1.)
+ self._test_equal(a, b)
+ self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return super(MyArray, self).__eq__(other).view(np.ndarray)
+
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
# remove anything that's not the array string
assert_equal(str(e).split('%)\n ')[1], b)
- def test_matrix(self):
- # Matrix slicing keeps things 2-D, while array does not necessarily.
- # See gh-8452.
- m1 = np.matrix([[1., 2.]])
- m2 = np.matrix([[1., np.nan]])
- m3 = np.matrix([[1., -np.inf]])
- m4 = np.matrix([[np.nan, np.inf]])
- m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
- for m in m1, m2, m3, m4, m5:
- self._assert_func(m, m)
- a = np.array(m)
- self._assert_func(a, m)
- self._assert_func(m, a)
-
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return super(MyArray, self).__eq__(other).view(np.ndarray)
+
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
self._assert_func(-ainf, x)
+@pytest.mark.skip(reason="The raises decorator depends on Nose")
class TestRaises(object):
def setup(self):
assert_equal(my_mod.__warningregistry__, {})
-if __name__ == '__main__':
- run_module_suite()
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles(object):
+ """ Test assert_no_gc_cycles """
+ def test_passes(self):
+ def no_cycle():
+ b = []
+ b.append([])
+ return b
+
+ with assert_no_gc_cycles():
+ no_cycle()
+
+ assert_no_gc_cycles(no_cycle)
+
+
+ def test_asserts(self):
+ def make_cycle():
+ a = []
+ a.append(a)
+ a.append(a)
+ return a
+
+ with assert_raises(AssertionError):
+ with assert_no_gc_cycles():
+ make_cycle()
+
+ with assert_raises(AssertionError):
+ assert_no_gc_cycles(make_cycle)
+
+
+ def test_fails(self):
+ """
+ Test that in cases where the garbage cannot be collected, we raise an
+ error, instead of hanging forever trying to clear it.
+ """
+
+ class ReferenceCycleInDel(object):
+ """
+ An object that not only contains a reference cycle, but creates new
+ cycles whenever it's garbage-collected and its __del__ runs
+ """
+ make_cycle = True
+
+ def __init__(self):
+ self.cycle = self
+
+ def __del__(self):
+ # break the current cycle so that `self` can be freed
+ self.cycle = None
+
+ if ReferenceCycleInDel.make_cycle:
+ # but create a new one so that the garbage collector has more
+ # work to do.
+ ReferenceCycleInDel()
+
+ try:
+ w = weakref.ref(ReferenceCycleInDel())
+ try:
+ with assert_raises(RuntimeError):
+ # this will be unable to get a baseline empty garbage
+ assert_no_gc_cycles(lambda: None)
+ except AssertionError:
+ # the above test is only necessary if the GC actually tried to free
+ # our object anyway, which python 2.7 does not.
+ if w() is not None:
+ pytest.skip("GC does not call __del__ on cyclic objects")
+ raise
+
+ finally:
+ # make sure that we stop creating reference cycles
+ ReferenceCycleInDel.make_cycle = False
set of tools
"""
-import os
+from __future__ import division, absolute_import, print_function
-from .nose_tools.utils import *
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn("Importing from numpy.testing.utils is deprecated, "
+ "import from numpy.testing instead.",
+ ImportWarning, stacklevel=2)
+
+from ._private.utils import *
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
- '_assert_valid_refcount', '_gen_alignment_data',
+ '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles'
]
from __future__ import division, absolute_import, print_function
import sys
+import pytest
import numpy as np
-from numpy.ctypeslib import ndpointer, load_library
+from numpy.ctypeslib import ndpointer, load_library, as_array
from numpy.distutils.misc_util import get_shared_lib_extension
-from numpy.testing import run_module_suite, assert_, assert_raises, dec
+from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
cdll = None
except ImportError:
_HAS_CTYPE = False
+
+@pytest.mark.skipif(not _HAS_CTYPE,
+ reason="ctypes not available in this python")
+@pytest.mark.skipif(sys.platform == 'cygwin',
+ reason="Known to fail on cygwin")
class TestLoadLibrary(object):
- @dec.skipif(not _HAS_CTYPE,
- "ctypes not available on this python installation")
- @dec.knownfailureif(sys.platform ==
- 'cygwin', "This test is known to fail on cygwin")
def test_basic(self):
try:
# Should succeed
" (import error was: %s)" % str(e))
print(msg)
- @dec.skipif(not _HAS_CTYPE,
- "ctypes not available on this python installation")
- @dec.knownfailureif(sys.platform ==
- 'cygwin', "This test is known to fail on cygwin")
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
" (import error was: %s)" % str(e))
print(msg)
+
class TestNdpointer(object):
def test_dtype(self):
dt = np.intc
assert_(a1 == a2)
-if __name__ == "__main__":
- run_module_suite()
+@pytest.mark.skipif(not _HAS_CTYPE,
+ reason="ctypes not available on this python installation")
+class TestAsArray(object):
+ def test_array(self):
+ from ctypes import c_int
+
+ pair_t = c_int * 2
+ a = as_array(pair_t(1, 2))
+ assert_equal(a.shape, (2,))
+ assert_array_equal(a, np.array([1, 2]))
+ a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
+ assert_equal(a.shape, (3, 2))
+ assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
+
+ def test_pointer(self):
+ from ctypes import c_int, cast, POINTER
+
+ p = cast((c_int * 10)(*range(10)), POINTER(c_int))
+
+ a = as_array(p, shape=(10,))
+ assert_equal(a.shape, (10,))
+ assert_array_equal(a, np.arange(10))
+
+ a = as_array(p, shape=(2, 5))
+ assert_equal(a.shape, (2, 5))
+ assert_array_equal(a, np.arange(10).reshape((2, 5)))
+
+ # shape argument is required
+ assert_raises(TypeError, as_array, p)
+
+ def test_struct_array_pointer(self):
+ from ctypes import c_int16, Structure, pointer
+
+ class Struct(Structure):
+ _fields_ = [('a', c_int16)]
+
+ Struct3 = 3 * Struct
+
+ c_array = (2 * Struct3)(
+ Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
+ Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
+ )
+
+ expected = np.array([
+ [(1,), (2,), (3,)],
+ [(4,), (5,), (6,)],
+ ], dtype=[('a', np.int16)])
+
+ def check(x):
+ assert_equal(x.dtype, expected.dtype)
+ assert_equal(x, expected)
+
+ # all of these should be equivalent
+ check(as_array(c_array))
+ check(as_array(pointer(c_array), shape=()))
+ check(as_array(pointer(c_array[0]), shape=(2,)))
+ check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
from __future__ import division, absolute_import, print_function
+# As we are testing matrices, we ignore its PendingDeprecationWarnings
+try:
+ import pytest
+ pytestmark = pytest.mark.filterwarnings(
+ 'ignore:the matrix subclass is not:PendingDeprecationWarning')
+except ImportError:
+ pass
+
import numpy as np
import numpy.matlib
-from numpy.testing import assert_array_equal, assert_, run_module_suite
+from numpy.testing import assert_array_equal, assert_
def test_empty():
x = numpy.matlib.empty((2,))
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)
-
-
-if __name__ == "__main__":
- run_module_suite()
import re
import numpy as np
-from numpy.testing import assert_, run_module_suite
+from numpy.testing import assert_
def test_valid_numpy_version():
res = re.match(version_pattern + dev_suffix, np.__version__)
assert_(res is not None, np.__version__)
-
-
-if __name__ == "__main__":
- run_module_suite()
import sys
import pickle
-from numpy.testing import assert_raises, assert_, assert_equal, run_module_suite
+from numpy.testing import assert_raises, assert_, assert_equal
if sys.version_info[:2] >= (3, 4):
from importlib import reload
import numpy as np
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue)) is np._NoValue)
-
-
-if __name__ == "__main__":
- run_module_suite()
"""
from __future__ import division, print_function, absolute_import
+import sys
import os
+import pytest
from os.path import join as pathjoin, isfile, dirname, basename
-import sys
from subprocess import Popen, PIPE
+
import numpy as np
from numpy.compat.py3k import basestring
-from nose.tools import assert_equal
-from numpy.testing import assert_, dec
+from numpy.testing import assert_, assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
return proc.returncode, stdout, stderr
-@dec.skipif(is_inplace)
+@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
def test_f2py():
# test that we can run f2py script
if sys.platform == 'win32':
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
-
-
from __future__ import division, absolute_import, print_function
-
import sys
+import pytest
+
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import numpy
- from numpy.testing import run_module_suite, dec
class ParseCall(ast.NodeVisitor):
def __init__(self):
"{} on line {}".format(self.__filename, node.lineno))
- @dec.slow
+ @pytest.mark.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
-
-
- if __name__ == "__main__":
- run_module_suite()
#-----------------------------------
# Source of the release notes
-RELEASE_NOTES = 'doc/release/1.15.0-notes.rst'
+RELEASE_NOTES = 'doc/release/1.15.1-notes.rst'
# Start/end of the log (from git)
-LOG_START = 'maintenance/1.14.x'
-LOG_END = 'master'
+LOG_START = 'v1.15.0'
+LOG_END = 'maintenance/1.15.x'
#-------------------------------------------------------
return _compute_hash(idirs, hashlib.md5)
def compute_sha256(idirs):
- # better checksum so gpg signed README.txt containing the sums can be used
+ # better checksum so gpg signed README.rst containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
def write_release_task(options, filename='README'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
- target = paver.path.path(filename)
+ target = paver.path.path(filename + '.rst')
if target.exists():
target.remove()
[pytest]
+addopts = -l
norecursedirs = doc tools numpy/linalg/lapack_lite numpy/core/code_generators
doctest_optionflags = NORMALIZE_WHITESPACE
-testpaths = numpy
+filterwarnings =
+ error
+# Filter out annoying import messages.
+ ignore:Not importing directory
+ ignore:numpy.dtype size changed
+ ignore:numpy.ufunc size changed
+# Ignore python2.7 -3 warnings
+ ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning
+ ignore:in 3\.x, __setslice__:DeprecationWarning
+ ignore:in 3\.x, __getslice__:DeprecationWarning
+ ignore:buffer\(\) not supported in 3\.x:DeprecationWarning
+ ignore:CObject type is not supported in 3\.x:DeprecationWarning
+ ignore:comparing unequal types not supported in 3\.x:DeprecationWarning
+ ignore:the commands module has been removed in Python 3\.0:DeprecationWarning
env =
PYTHONHASHSEED=0
-
-# addopts = --doctest-modules --ignore=numpy/f2py/__main__.py --ignore=numpy/core/cversions.py --ignore=numpy/ma/core.py --ignore=numpy/ma/version.py --ignore=numpy/testing/utils.py --ignore=numpy/testing/decorators.py
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
- $ python runtests.py -t {SAMPLE_TEST} -- {SAMPLE_NOSE_ARGUMENTS}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
- $ python runtests.py --timer 20
+ $ python runtests.py --durations 20
Run a debugger:
PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py:test_byteorder_check"
SAMPLE_SUBMODULE = "linalg"
-SAMPLE_NOSE_ARGUMENTS = "--pdb"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
+ #parser.add_argument("--refguide-check", action="store_true", default=False,
+ #help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
- parser.add_argument("--timer", action="store", default=0, type=int,
- help=("Time N slowest test"))
+ parser.add_argument("--durations", action="store", default=-1, type=int,
+ help=("Time N slowest tests, time all if 0, time none if < 0"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="store", metavar="COMMIT",
- help=("Compare benchmark results to COMMIT. "
- "Note that you need to commit your changes first!"))
- parser.add_argument("--raise-warnings", default=None, type=str,
- choices=('develop', 'release'),
- help=("if 'develop', warnings are treated as errors; "
- "defaults to 'develop' in development versions."))
+ help=("Compare benchmark results of current HEAD to "
+ "BEFORE. Use an additional "
+ "--bench-compare=COMMIT to override HEAD with "
+ "COMMIT. Note that you need to commit your "
+ "changes first!"))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
- if args.timer == 0:
- timer = False
- elif args.timer == -1:
- timer = True
- elif args.timer > 0:
- timer = int(args.timer)
- else:
- raise ValueError("--timer value should be an integer, -1 or >0")
- args.timer = timer
+ if args.durations < 0:
+ args.durations = -1
if args.bench_compare:
args.bench = True
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
- extra_argv += ['--cover-html',
- '--cover-html-dir='+dst_dir]
+ extra_argv += ['--cov-report=html:' + dst_dir]
if args.bench:
# Run ASV
if args.build_only:
sys.exit(0)
- elif args.submodule:
- modname = PROJECT_MODULE + '.' + args.submodule
- try:
- __import__(modname)
- test = sys.modules[modname].test
- except (ImportError, KeyError, AttributeError):
- print("Cannot run tests for %s" % modname)
- sys.exit(2)
- elif args.tests:
- def fix_test_path(x):
- # fix up test path
- p = x.split(':')
- p[0] = os.path.join(site_dir, p[0])
- return ':'.join(p)
-
- tests = [fix_test_path(x) for x in args.tests]
-
- def test(*a, **kw):
- extra_argv = kw.pop('extra_argv', ())
- extra_argv = extra_argv + tests[1:]
- kw['extra_argv'] = extra_argv
- import numpy as np
- from numpy.testing import Tester
- if kw["raise_warnings"] is None:
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- kw["raise_warnings"] = "develop"
- else:
- kw["raise_warnings"] = "release"
- return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
+ if args.submodule:
+ tests = [PROJECT_MODULE + "." + args.submodule]
+ elif args.tests:
+ tests = args.tests
+ else:
+ tests = None
+
+
# Run the tests under build/test
- try:
- shutil.rmtree(test_dir)
- except OSError:
- pass
- try:
- os.makedirs(test_dir)
- except OSError:
- pass
+
+ if not args.no_build:
+ test_dir = site_dir
+ else:
+ test_dir = os.path.join(ROOT_DIR, 'build', 'test')
+ if not os.path.isdir(test_dir):
+ os.makedirs(test_dir)
+
+ shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
+ os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
- raise_warnings=args.raise_warnings,
coverage=args.coverage,
- timer=args.timer)
+ durations=args.durations,
+ tests=tests)
finally:
os.chdir(cwd)
- if result.wasSuccessful():
+ if isinstance(result, bool):
+ sys.exit(0 if result else 1)
+ elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
"""
+ import distutils.sysconfig
+
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
-
+ cvars = distutils.sysconfig.get_config_vars()
+ if 'gcc' in cvars.get('CC', ''):
+ # add flags used as werrors
+ warnings_as_errors = ' '.join([
+ # from tools/travis-test.sh
+ '-Werror=declaration-after-statement',
+ '-Werror=vla',
+ '-Werror=nonnull',
+ '-Werror=pointer-arith',
+ '-Wlogical-op',
+ # from sysconfig
+ '-Werror=unused-function',
+ ])
+ env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
- import distutils.sysconfig
- cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
+Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
MAJOR = 1
MINOR = 15
-MICRO = 0
-ISRELEASED = False
+MICRO = 1
+ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
cmdclass={"sdist": sdist_checked},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
zip_safe=False,
- entry_points={
- 'console_scripts': [
- 'f2py = numpy.f2py.__main__:main',
- 'conv-template = numpy.distutils.conv_template:main',
- 'from-template = numpy.distutils.from_template:main',
- ]
- },
)
if "--force" in sys.argv:
# mkl_libs = mkl_rt
# lapack_libs =
+# ACCELERATE
+# ----------
+# Accelerate/vecLib is an OSX framework providing a BLAS and LAPACK implementations.
+#
+# [accelerate]
+# libraries = Accelerate, vecLib
+# #libraries = None
+
# UMFPACK
# -------
# The UMFPACK library is used in scikits.umfpack to factor large sparse matrices.
# Rules
#
def process_pyx(fromfile, tofile):
- try:
- from Cython.Compiler.Version import version as cython_version
- from distutils.version import LooseVersion
- if LooseVersion(cython_version) < LooseVersion('0.19'):
- raise Exception('Building %s requires Cython >= 0.19' % VENDOR)
-
- except ImportError:
- pass
-
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
+ # try the cython in the installed python first (somewhat related to scipy/scipy#2397)
+ from Cython.Compiler.Version import version as cython_version
+ except ImportError:
+ # if that fails, use the one on the path, which might be the wrong version
try:
- r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
- if r != 0:
- raise Exception('Cython failed')
+ # Try the one on the path as a last resort
+ subprocess.check_call(
+ ['cython'] + flags + ["-o", tofile, fromfile])
except OSError:
- # There are ways of installing Cython that don't result in a cython
- # executable on the path, see gh-2397.
- r = subprocess.call([sys.executable, '-c',
- 'import sys; from Cython.Compiler.Main import '
- 'setuptools_main as main; sys.exit(main())'] + flags +
- ["-o", tofile, fromfile])
- if r != 0:
- raise Exception('Cython failed')
- except OSError:
- raise OSError('Cython needs to be installed')
+ raise OSError('Cython needs to be installed')
+ else:
+ # check the version, and invoke through python
+ from distutils.version import LooseVersion
+
+ # requiring the newest version on all pythons doesn't work, since
+ # we're relying on the version of the distribution cython. Add new
+ # versions as they become required for new python versions.
+ if sys.version_info[:2] < (3, 7):
+ required_version = LooseVersion('0.19')
+ else:
+ required_version = LooseVersion('0.28')
+
+ if LooseVersion(cython_version) < required_version:
+ raise RuntimeError('Building {} requires Cython >= {}'.format(
+ VENDOR, required_version))
+ subprocess.check_call(
+ [sys.executable, '-m', 'cython'] + flags + ["-o", tofile, fromfile])
+
def process_tempita_pyx(fromfile, tofile):
import npy_tempita as tempita
help="Run doctests in module")
parser.add_option("--coverage",
action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'coverage' module")
+ help="report coverage of NumPy code (requires 'pytest-cov' module")
parser.add_option("-m", "--mode",
action="store", dest="mode", default="fast",
help="'fast', 'full', or something that could be "
- "passed to nosetests -A [default: %default]")
+ "passed to pytest [default: %default]")
(options, args) = parser.parse_args()
import numpy
doctests=options.doctests,
coverage=options.coverage)
-if result.wasSuccessful():
+if result:
sys.exit(0)
else:
sys.exit(1)
source venv/bin/activate
python -V
pip install --upgrade pip setuptools
-pip install nose pytz cython
+pip install nose pytz cython pytest
if [ -n "$USE_ASV" ]; then pip install asv; fi
popd
# install needed packages
sudo chroot $DIR bash -c "apt-get install -qq -y \
- libatlas-base-dev gfortran python-dev python-nose python-pip cython"
+ libatlas-base-dev gfortran python-dev python-nose python-pip cython \
+ python-pytest"
}
run_test()
"import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
if [ -n "$RUN_FULL_TESTS" ]; then
- $PYTHON ../tools/test-installed-numpy.py --mode=full
+ $PYTHON ../tools/test-installed-numpy.py -v --mode=full
else
- $PYTHON ../tools/test-installed-numpy.py
+ $PYTHON ../tools/test-installed-numpy.py -v
fi
if [ -n "$USE_ASV" ]; then
pushd ../benchmarks
# Move out of source directory to avoid finding local numpy
pushd dist
pip install --pre --no-index --upgrade --find-links=. numpy
- pip install nose
+ pip install nose pytest
popd
run_test
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# Move out of source directory to avoid finding local numpy
pushd dist
pip install numpy*
- pip install nose
+ pip install nose pytest
popd
run_test
elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then